]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf.c
5529d8056cf39b798a45afdf5d5e99e7a60cf246
[apple/xnu.git] / bsd / net / pf.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit 7c8016ea91f7b68950cf41729c92dd8e3e423ba7 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
75 #include <sys/time.h>
76 #include <sys/proc.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
79
80 #include <libkern/crypto/md5.h>
81 #include <libkern/libkern.h>
82
83 #include <mach/thread_act.h>
84
85 #include <net/if.h>
86 #include <net/if_types.h>
87 #include <net/bpf.h>
88 #include <net/route.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/udp.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/in_pcb.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/udp_var.h>
104 #include <netinet/icmp_var.h>
105 #include <net/if_ether.h>
106 #include <net/ethernet.h>
107
108 #include <net/pfvar.h>
109 #include <net/if_pflog.h>
110
111 #if NPFSYNC
112 #include <net/if_pfsync.h>
113 #endif /* NPFSYNC */
114
115 #if INET6
116 #include <netinet/ip6.h>
117 #include <netinet6/in6_pcb.h>
118 #include <netinet6/ip6_var.h>
119 #include <netinet/icmp6.h>
120 #include <netinet6/nd6.h>
121 #endif /* INET6 */
122
123 #ifndef NO_APPLE_EXTENSIONS
124 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
125 #else
126 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
127 #endif
128
129 /* XXX: should be in header somewhere */
130 #define satosin(sa) ((struct sockaddr_in *)(sa))
131 #define sintosa(sin) ((struct sockaddr *)(sin))
132
133 /*
134 * On Mac OS X, the rtableid value is treated as the interface scope
135 * value that is equivalent to the interface index used for scoped
136 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
137 * as per definition of ifindex which is a positive, non-zero number.
138 * The other BSDs treat a negative rtableid value as invalid, hence
139 * the test against INT_MAX to handle userland apps which initialize
140 * the field with a negative number.
141 */
142 #define PF_RTABLEID_IS_VALID(r) \
143 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
144
145 /*
146 * Global variables
147 */
148 lck_mtx_t *pf_lock;
149 lck_rw_t *pf_perim_lock;
150
151 /* state tables */
152 struct pf_state_tree_lan_ext pf_statetbl_lan_ext;
153 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy;
154
155 struct pf_palist pf_pabuf;
156 struct pf_status pf_status;
157
158 #if ALTQ
159 struct pf_altqqueue pf_altqs[2];
160 struct pf_altqqueue *pf_altqs_active;
161 struct pf_altqqueue *pf_altqs_inactive;
162 u_int32_t ticket_altqs_active;
163 u_int32_t ticket_altqs_inactive;
164 int altqs_inactive_open;
165 #endif /* ALTQ */
166 u_int32_t ticket_pabuf;
167
168 static MD5_CTX pf_tcp_secret_ctx;
169 static u_char pf_tcp_secret[16];
170 static int pf_tcp_secret_init;
171 static int pf_tcp_iss_off;
172
173 static struct pf_anchor_stackframe {
174 struct pf_ruleset *rs;
175 struct pf_rule *r;
176 struct pf_anchor_node *parent;
177 struct pf_anchor *child;
178 } pf_anchor_stack[64];
179
180 struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
181 struct pool pf_state_pl, pf_state_key_pl;
182 #if ALTQ
183 struct pool pf_altq_pl;
184 #endif /* ALTQ */
185
186 #ifndef NO_APPLE_EXTENSIONS
187 typedef void (*hook_fn_t)(void *);
188
189 struct hook_desc {
190 TAILQ_ENTRY(hook_desc) hd_list;
191 hook_fn_t hd_fn;
192 void *hd_arg;
193 };
194
195 #define HOOK_REMOVE 0x01
196 #define HOOK_FREE 0x02
197 #define HOOK_ABORT 0x04
198
199 static void *hook_establish(struct hook_desc_head *, int,
200 hook_fn_t, void *);
201 static void hook_runloop(struct hook_desc_head *, int flags);
202
203 struct pool pf_app_state_pl;
204 static void pf_print_addr(struct pf_addr *addr, sa_family_t af);
205 static void pf_print_sk_host(struct pf_state_host *, u_int8_t, int,
206 u_int8_t);
207 #endif
208
209 static void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
210
211 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
212 u_int32_t);
213 static void pf_add_threshold(struct pf_threshold *);
214 static int pf_check_threshold(struct pf_threshold *);
215
216 static void pf_change_ap(int, struct mbuf *, struct pf_addr *,
217 u_int16_t *, u_int16_t *, u_int16_t *,
218 struct pf_addr *, u_int16_t, u_int8_t, sa_family_t);
219 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
220 struct tcphdr *, struct pf_state_peer *);
221 #if INET6
222 static void pf_change_a6(struct pf_addr *, u_int16_t *,
223 struct pf_addr *, u_int8_t);
224 #endif /* INET6 */
225 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
226 struct pf_addr *, struct pf_addr *, u_int16_t,
227 u_int16_t *, u_int16_t *, u_int16_t *,
228 u_int16_t *, u_int8_t, sa_family_t);
229 static void pf_send_tcp(const struct pf_rule *, sa_family_t,
230 const struct pf_addr *, const struct pf_addr *,
231 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
232 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
233 u_int16_t, struct ether_header *, struct ifnet *);
234 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
235 sa_family_t, struct pf_rule *);
236 #ifndef NO_APPLE_EXTENSIONS
237 static struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
238 int, int, struct pfi_kif *, struct pf_addr *,
239 union pf_state_xport *, struct pf_addr *,
240 union pf_state_xport *, int);
241 static struct pf_rule *pf_get_translation_aux(struct pf_pdesc *,
242 struct mbuf *, int, int, struct pfi_kif *,
243 struct pf_src_node **, struct pf_addr *,
244 union pf_state_xport *, struct pf_addr *,
245 union pf_state_xport *, struct pf_addr *,
246 union pf_state_xport *);
247 #else
248 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
249 int, int, struct pfi_kif *,
250 struct pf_addr *, u_int16_t, struct pf_addr *,
251 u_int16_t, int);
252 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
253 int, int, struct pfi_kif *, struct pf_src_node **,
254 struct pf_addr *, u_int16_t,
255 struct pf_addr *, u_int16_t,
256 struct pf_addr *, u_int16_t *);
257 #endif
258 static void pf_attach_state(struct pf_state_key *,
259 struct pf_state *, int);
260 static void pf_detach_state(struct pf_state *, int);
261 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
262 static int pf_test_rule(struct pf_rule **, struct pf_state **,
263 int, struct pfi_kif *, struct mbuf *, int,
264 void *, struct pf_pdesc *, struct pf_rule **,
265 struct pf_ruleset **, struct ifqueue *);
266 static int pf_test_fragment(struct pf_rule **, int,
267 struct pfi_kif *, struct mbuf *, void *,
268 struct pf_pdesc *, struct pf_rule **,
269 struct pf_ruleset **);
270 static int pf_test_state_tcp(struct pf_state **, int,
271 struct pfi_kif *, struct mbuf *, int,
272 void *, struct pf_pdesc *, u_short *);
273 static int pf_test_state_udp(struct pf_state **, int,
274 struct pfi_kif *, struct mbuf *, int,
275 void *, struct pf_pdesc *, u_short *);
276 static int pf_test_state_icmp(struct pf_state **, int,
277 struct pfi_kif *, struct mbuf *, int,
278 void *, struct pf_pdesc *, u_short *);
279 static int pf_test_state_other(struct pf_state **, int,
280 struct pfi_kif *, struct pf_pdesc *);
281 static int pf_match_tag(struct mbuf *, struct pf_rule *,
282 struct pf_mtag *, int *);
283 static void pf_step_into_anchor(int *, struct pf_ruleset **, int,
284 struct pf_rule **, struct pf_rule **, int *);
285 static int pf_step_out_of_anchor(int *, struct pf_ruleset **,
286 int, struct pf_rule **, struct pf_rule **,
287 int *);
288 static void pf_hash(struct pf_addr *, struct pf_addr *,
289 struct pf_poolhashkey *, sa_family_t);
290 static int pf_map_addr(u_int8_t, struct pf_rule *,
291 struct pf_addr *, struct pf_addr *,
292 struct pf_addr *, struct pf_src_node **);
293 #ifndef NO_APPLE_EXTENSIONS
294 static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *,
295 struct pf_rule *, struct pf_addr *,
296 union pf_state_xport *, struct pf_addr *,
297 union pf_state_xport *, struct pf_addr *,
298 union pf_state_xport *, struct pf_src_node **);
299 #else
300 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *,
301 struct pf_addr *, struct pf_addr *, u_int16_t,
302 struct pf_addr *, u_int16_t *, u_int16_t, u_int16_t,
303 struct pf_src_node **);
304 #endif
305 static void pf_route(struct mbuf **, struct pf_rule *, int,
306 struct ifnet *, struct pf_state *,
307 struct pf_pdesc *);
308 #if INET6
309 static void pf_route6(struct mbuf **, struct pf_rule *, int,
310 struct ifnet *, struct pf_state *,
311 struct pf_pdesc *);
312 #endif /* INET6 */
313 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
314 sa_family_t);
315 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
316 sa_family_t);
317 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
318 u_int16_t);
319 static void pf_set_rt_ifp(struct pf_state *,
320 struct pf_addr *);
321 static int pf_check_proto_cksum(struct mbuf *, int, int,
322 u_int8_t, sa_family_t);
323 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
324 struct pf_addr_wrap *);
325 static struct pf_state *pf_find_state(struct pfi_kif *,
326 struct pf_state_key_cmp *, u_int);
327 static int pf_src_connlimit(struct pf_state **);
328 static void pf_stateins_err(const char *, struct pf_state *,
329 struct pfi_kif *);
330 static int pf_check_congestion(struct ifqueue *);
331
332 #ifndef NO_APPLE_EXTENSIONS
333 #if 0
334 static const char *pf_pptp_ctrl_type_name(u_int16_t code);
335 #endif
336 static void pf_pptp_handler(struct pf_state *, int, int,
337 struct pf_pdesc *, struct pfi_kif *);
338 static void pf_pptp_unlink(struct pf_state *);
339 static int pf_test_state_grev1(struct pf_state **, int,
340 struct pfi_kif *, int, struct pf_pdesc *);
341 static int pf_ike_compare(struct pf_app_state *,
342 struct pf_app_state *);
343 static int pf_test_state_esp(struct pf_state **, int,
344 struct pfi_kif *, int, struct pf_pdesc *);
345 #endif
346
347 extern struct pool pfr_ktable_pl;
348 extern struct pool pfr_kentry_pl;
349 extern int path_mtu_discovery;
350
351 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
352 { &pf_state_pl, PFSTATE_HIWAT },
353 { &pf_app_state_pl, PFAPPSTATE_HIWAT },
354 { &pf_src_tree_pl, PFSNODE_HIWAT },
355 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
356 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
357 { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
358 };
359
360 #ifndef NO_APPLE_EXTENSIONS
361 struct mbuf *
362 pf_lazy_makewritable(struct pf_pdesc *pd, struct mbuf *m, int len)
363 {
364 if (pd->lmw < 0)
365 return (0);
366
367 VERIFY(m == pd->mp);
368
369 if (len > pd->lmw) {
370 if (m_makewritable(&m, 0, len, M_DONTWAIT))
371 len = -1;
372 pd->lmw = len;
373 if (len >= 0 && m != pd->mp) {
374 pd->mp = m;
375
376 switch (pd->af) {
377 case AF_INET: {
378 struct ip *h = mtod(m, struct ip *);
379 pd->src = (struct pf_addr *)&h->ip_src;
380 pd->dst = (struct pf_addr *)&h->ip_dst;
381 pd->ip_sum = &h->ip_sum;
382 break;
383 }
384 #if INET6
385 case AF_INET6: {
386 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
387 pd->src = (struct pf_addr *)&h->ip6_src;
388 pd->dst = (struct pf_addr *)&h->ip6_dst;
389 break;
390 }
391 #endif /* INET6 */
392 }
393 }
394 }
395
396 return (len < 0 ? 0 : m);
397 }
398
399 static const int *
400 pf_state_lookup_aux(struct pf_state **state, struct pfi_kif *kif,
401 int direction, int *action)
402 {
403 if (*state == NULL || (*state)->timeout == PFTM_PURGE) {
404 *action = PF_DROP;
405 return (action);
406 }
407
408 if (direction == PF_OUT &&
409 (((*state)->rule.ptr->rt == PF_ROUTETO &&
410 (*state)->rule.ptr->direction == PF_OUT) ||
411 ((*state)->rule.ptr->rt == PF_REPLYTO &&
412 (*state)->rule.ptr->direction == PF_IN)) &&
413 (*state)->rt_kif != NULL && (*state)->rt_kif != kif) {
414 *action = PF_PASS;
415 return (action);
416 }
417
418 return (0);
419 }
420
421 #define STATE_LOOKUP() \
422 do { \
423 int action; \
424 *state = pf_find_state(kif, &key, direction); \
425 if (pf_state_lookup_aux(state, kif, direction, &action)) \
426 return (action); \
427 } while (0)
428
429 #define STATE_ADDR_TRANSLATE(sk) \
430 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
431 ((sk)->af == AF_INET6 && \
432 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
433 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
434 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
435
436 #define STATE_TRANSLATE(sk) \
437 (STATE_ADDR_TRANSLATE(sk) || \
438 (sk)->lan.xport.port != (sk)->gwy.xport.port)
439
440 #define STATE_GRE_TRANSLATE(sk) \
441 (STATE_ADDR_TRANSLATE(sk) || \
442 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
443
444 #else
445 #define STATE_LOOKUP() \
446 do { \
447 *state = pf_find_state(kif, &key, direction); \
448 if (*state == NULL || (*state)->timeout == PFTM_PURGE) \
449 return (PF_DROP); \
450 if (direction == PF_OUT && \
451 (((*state)->rule.ptr->rt == PF_ROUTETO && \
452 (*state)->rule.ptr->direction == PF_OUT) || \
453 ((*state)->rule.ptr->rt == PF_REPLYTO && \
454 (*state)->rule.ptr->direction == PF_IN)) && \
455 (*state)->rt_kif != NULL && \
456 (*state)->rt_kif != kif) \
457 return (PF_PASS); \
458 } while (0)
459
460 #define STATE_TRANSLATE(sk) \
461 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
462 ((sk)->af == AF_INET6 && \
463 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
464 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
465 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3])) || \
466 (sk)->lan.port != (sk)->gwy.port
467 #endif
468
469 #define BOUND_IFACE(r, k) \
470 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
471
472 #define STATE_INC_COUNTERS(s) \
473 do { \
474 s->rule.ptr->states++; \
475 VERIFY(s->rule.ptr->states != 0); \
476 if (s->anchor.ptr != NULL) { \
477 s->anchor.ptr->states++; \
478 VERIFY(s->anchor.ptr->states != 0); \
479 } \
480 if (s->nat_rule.ptr != NULL) { \
481 s->nat_rule.ptr->states++; \
482 VERIFY(s->nat_rule.ptr->states != 0); \
483 } \
484 } while (0)
485
486 #define STATE_DEC_COUNTERS(s) \
487 do { \
488 if (s->nat_rule.ptr != NULL) { \
489 VERIFY(s->nat_rule.ptr->states > 0); \
490 s->nat_rule.ptr->states--; \
491 } \
492 if (s->anchor.ptr != NULL) { \
493 VERIFY(s->anchor.ptr->states > 0); \
494 s->anchor.ptr->states--; \
495 } \
496 VERIFY(s->rule.ptr->states > 0); \
497 s->rule.ptr->states--; \
498 } while (0)
499
500 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
501 static __inline int pf_state_compare_lan_ext(struct pf_state_key *,
502 struct pf_state_key *);
503 static __inline int pf_state_compare_ext_gwy(struct pf_state_key *,
504 struct pf_state_key *);
505 static __inline int pf_state_compare_id(struct pf_state *,
506 struct pf_state *);
507
508 struct pf_src_tree tree_src_tracking;
509
510 struct pf_state_tree_id tree_id;
511 struct pf_state_queue state_list;
512
513 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
514 RB_GENERATE(pf_state_tree_lan_ext, pf_state_key,
515 entry_lan_ext, pf_state_compare_lan_ext);
516 RB_GENERATE(pf_state_tree_ext_gwy, pf_state_key,
517 entry_ext_gwy, pf_state_compare_ext_gwy);
518 RB_GENERATE(pf_state_tree_id, pf_state,
519 entry_id, pf_state_compare_id);
520
521 #define PF_DT_SKIP_LANEXT 0x01
522 #define PF_DT_SKIP_EXTGWY 0x02
523
524 #ifndef NO_APPLE_EXTENSIONS
525 static const u_int16_t PF_PPTP_PORT = 1723;
526 static const u_int32_t PF_PPTP_MAGIC_NUMBER = 0x1A2B3C4D;
527
528 struct pf_pptp_hdr {
529 u_int16_t length;
530 u_int16_t type;
531 u_int32_t magic;
532 };
533
534 struct pf_pptp_ctrl_hdr {
535 u_int16_t type;
536 u_int16_t reserved_0;
537 };
538
539 struct pf_pptp_ctrl_generic {
540 u_int16_t data[0];
541 };
542
543 #define PF_PPTP_CTRL_TYPE_START_REQ 1
544 struct pf_pptp_ctrl_start_req {
545 u_int16_t protocol_version;
546 u_int16_t reserved_1;
547 u_int32_t framing_capabilities;
548 u_int32_t bearer_capabilities;
549 u_int16_t maximum_channels;
550 u_int16_t firmware_revision;
551 u_int8_t host_name[64];
552 u_int8_t vendor_string[64];
553 };
554
555 #define PF_PPTP_CTRL_TYPE_START_RPY 2
556 struct pf_pptp_ctrl_start_rpy {
557 u_int16_t protocol_version;
558 u_int8_t result_code;
559 u_int8_t error_code;
560 u_int32_t framing_capabilities;
561 u_int32_t bearer_capabilities;
562 u_int16_t maximum_channels;
563 u_int16_t firmware_revision;
564 u_int8_t host_name[64];
565 u_int8_t vendor_string[64];
566 };
567
568 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
569 struct pf_pptp_ctrl_stop_req {
570 u_int8_t reason;
571 u_int8_t reserved_1;
572 u_int16_t reserved_2;
573 };
574
575 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
576 struct pf_pptp_ctrl_stop_rpy {
577 u_int8_t reason;
578 u_int8_t error_code;
579 u_int16_t reserved_1;
580 };
581
582 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
583 struct pf_pptp_ctrl_echo_req {
584 u_int32_t identifier;
585 };
586
587 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
588 struct pf_pptp_ctrl_echo_rpy {
589 u_int32_t identifier;
590 u_int8_t result_code;
591 u_int8_t error_code;
592 u_int16_t reserved_1;
593 };
594
595 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
596 struct pf_pptp_ctrl_call_out_req {
597 u_int16_t call_id;
598 u_int16_t call_sernum;
599 u_int32_t min_bps;
600 u_int32_t bearer_type;
601 u_int32_t framing_type;
602 u_int16_t rxwindow_size;
603 u_int16_t proc_delay;
604 u_int8_t phone_num[64];
605 u_int8_t sub_addr[64];
606 };
607
608 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
609 struct pf_pptp_ctrl_call_out_rpy {
610 u_int16_t call_id;
611 u_int16_t peer_call_id;
612 u_int8_t result_code;
613 u_int8_t error_code;
614 u_int16_t cause_code;
615 u_int32_t connect_speed;
616 u_int16_t rxwindow_size;
617 u_int16_t proc_delay;
618 u_int32_t phy_channel_id;
619 };
620
621 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
622 struct pf_pptp_ctrl_call_in_1st {
623 u_int16_t call_id;
624 u_int16_t call_sernum;
625 u_int32_t bearer_type;
626 u_int32_t phy_channel_id;
627 u_int16_t dialed_number_len;
628 u_int16_t dialing_number_len;
629 u_int8_t dialed_num[64];
630 u_int8_t dialing_num[64];
631 u_int8_t sub_addr[64];
632 };
633
634 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
635 struct pf_pptp_ctrl_call_in_2nd {
636 u_int16_t call_id;
637 u_int16_t peer_call_id;
638 u_int8_t result_code;
639 u_int8_t error_code;
640 u_int16_t rxwindow_size;
641 u_int16_t txdelay;
642 u_int16_t reserved_1;
643 };
644
645 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
646 struct pf_pptp_ctrl_call_in_3rd {
647 u_int16_t call_id;
648 u_int16_t reserved_1;
649 u_int32_t connect_speed;
650 u_int16_t rxwindow_size;
651 u_int16_t txdelay;
652 u_int32_t framing_type;
653 };
654
655 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
656 struct pf_pptp_ctrl_call_clr {
657 u_int16_t call_id;
658 u_int16_t reserved_1;
659 };
660
661 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
662 struct pf_pptp_ctrl_call_disc {
663 u_int16_t call_id;
664 u_int8_t result_code;
665 u_int8_t error_code;
666 u_int16_t cause_code;
667 u_int16_t reserved_1;
668 u_int8_t statistics[128];
669 };
670
671 #define PF_PPTP_CTRL_TYPE_ERROR 14
672 struct pf_pptp_ctrl_error {
673 u_int16_t peer_call_id;
674 u_int16_t reserved_1;
675 u_int32_t crc_errors;
676 u_int32_t fr_errors;
677 u_int32_t hw_errors;
678 u_int32_t buf_errors;
679 u_int32_t tim_errors;
680 u_int32_t align_errors;
681 };
682
683 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
684 struct pf_pptp_ctrl_set_linkinfo {
685 u_int16_t peer_call_id;
686 u_int16_t reserved_1;
687 u_int32_t tx_accm;
688 u_int32_t rx_accm;
689 };
690
691 #if 0
692 static const char *pf_pptp_ctrl_type_name(u_int16_t code)
693 {
694 code = ntohs(code);
695
696 if (code < PF_PPTP_CTRL_TYPE_START_REQ ||
697 code > PF_PPTP_CTRL_TYPE_SET_LINKINFO) {
698 static char reserved[] = "reserved-00";
699
700 sprintf(&reserved[9], "%02x", code);
701 return (reserved);
702 } else {
703 static const char *name[] = {
704 "start_req", "start_rpy", "stop_req", "stop_rpy",
705 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
706 "call_in_1st", "call_in_2nd", "call_in_3rd",
707 "call_clr", "call_disc", "error", "set_linkinfo"
708 };
709
710 return (name[code - 1]);
711 }
712 };
713 #endif
714
715 static const size_t PF_PPTP_CTRL_MSG_MINSIZE =
716 sizeof (struct pf_pptp_hdr) +
717 sizeof (struct pf_pptp_ctrl_hdr) +
718 MIN(sizeof (struct pf_pptp_ctrl_start_req),
719 MIN(sizeof (struct pf_pptp_ctrl_start_rpy),
720 MIN(sizeof (struct pf_pptp_ctrl_stop_req),
721 MIN(sizeof (struct pf_pptp_ctrl_stop_rpy),
722 MIN(sizeof (struct pf_pptp_ctrl_echo_req),
723 MIN(sizeof (struct pf_pptp_ctrl_echo_rpy),
724 MIN(sizeof (struct pf_pptp_ctrl_call_out_req),
725 MIN(sizeof (struct pf_pptp_ctrl_call_out_rpy),
726 MIN(sizeof (struct pf_pptp_ctrl_call_in_1st),
727 MIN(sizeof (struct pf_pptp_ctrl_call_in_2nd),
728 MIN(sizeof (struct pf_pptp_ctrl_call_in_3rd),
729 MIN(sizeof (struct pf_pptp_ctrl_call_clr),
730 MIN(sizeof (struct pf_pptp_ctrl_call_disc),
731 MIN(sizeof (struct pf_pptp_ctrl_error),
732 sizeof (struct pf_pptp_ctrl_set_linkinfo)
733 ))))))))))))));
734
735 union pf_pptp_ctrl_msg_union {
736 struct pf_pptp_ctrl_start_req start_req;
737 struct pf_pptp_ctrl_start_rpy start_rpy;
738 struct pf_pptp_ctrl_stop_req stop_req;
739 struct pf_pptp_ctrl_stop_rpy stop_rpy;
740 struct pf_pptp_ctrl_echo_req echo_req;
741 struct pf_pptp_ctrl_echo_rpy echo_rpy;
742 struct pf_pptp_ctrl_call_out_req call_out_req;
743 struct pf_pptp_ctrl_call_out_rpy call_out_rpy;
744 struct pf_pptp_ctrl_call_in_1st call_in_1st;
745 struct pf_pptp_ctrl_call_in_2nd call_in_2nd;
746 struct pf_pptp_ctrl_call_in_3rd call_in_3rd;
747 struct pf_pptp_ctrl_call_clr call_clr;
748 struct pf_pptp_ctrl_call_disc call_disc;
749 struct pf_pptp_ctrl_error error;
750 struct pf_pptp_ctrl_set_linkinfo set_linkinfo;
751 u_int8_t data[0];
752 };
753
754 struct pf_pptp_ctrl_msg {
755 struct pf_pptp_hdr hdr;
756 struct pf_pptp_ctrl_hdr ctrl;
757 union pf_pptp_ctrl_msg_union msg;
758 };
759
760 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
761 #define PF_GRE_FLAG_VERSION_MASK 0x0007
762 #define PF_GRE_PPP_ETHERTYPE 0x880B
763
764 struct pf_grev1_hdr {
765 u_int16_t flags;
766 u_int16_t protocol_type;
767 u_int16_t payload_length;
768 u_int16_t call_id;
769 /*
770 u_int32_t seqno;
771 u_int32_t ackno;
772 */
773 };
774
775 static const u_int16_t PF_IKE_PORT = 500;
776
777 struct pf_ike_hdr {
778 u_int64_t initiator_cookie, responder_cookie;
779 u_int8_t next_payload, version, exchange_type, flags;
780 u_int32_t message_id, length;
781 };
782
783 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
784
785 #define PF_IKEv1_EXCHTYPE_BASE 1
786 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
787 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
788 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
789 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
790 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
791 #define PF_IKEv2_EXCHTYPE_AUTH 35
792 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
793 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
794
795 #define PF_IKEv1_FLAG_E 0x01
796 #define PF_IKEv1_FLAG_C 0x02
797 #define PF_IKEv1_FLAG_A 0x04
798 #define PF_IKEv2_FLAG_I 0x08
799 #define PF_IKEv2_FLAG_V 0x10
800 #define PF_IKEv2_FLAG_R 0x20
801
802 struct pf_esp_hdr {
803 u_int32_t spi;
804 u_int32_t seqno;
805 u_int8_t payload[];
806 };
807 #endif
808
809 static __inline int
810 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
811 {
812 int diff;
813
814 if (a->rule.ptr > b->rule.ptr)
815 return (1);
816 if (a->rule.ptr < b->rule.ptr)
817 return (-1);
818 if ((diff = a->af - b->af) != 0)
819 return (diff);
820 switch (a->af) {
821 #if INET
822 case AF_INET:
823 if (a->addr.addr32[0] > b->addr.addr32[0])
824 return (1);
825 if (a->addr.addr32[0] < b->addr.addr32[0])
826 return (-1);
827 break;
828 #endif /* INET */
829 #if INET6
830 case AF_INET6:
831 if (a->addr.addr32[3] > b->addr.addr32[3])
832 return (1);
833 if (a->addr.addr32[3] < b->addr.addr32[3])
834 return (-1);
835 if (a->addr.addr32[2] > b->addr.addr32[2])
836 return (1);
837 if (a->addr.addr32[2] < b->addr.addr32[2])
838 return (-1);
839 if (a->addr.addr32[1] > b->addr.addr32[1])
840 return (1);
841 if (a->addr.addr32[1] < b->addr.addr32[1])
842 return (-1);
843 if (a->addr.addr32[0] > b->addr.addr32[0])
844 return (1);
845 if (a->addr.addr32[0] < b->addr.addr32[0])
846 return (-1);
847 break;
848 #endif /* INET6 */
849 }
850 return (0);
851 }
852
853 static __inline int
854 pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b)
855 {
856 int diff;
857 #ifndef NO_APPLE_EXTENSIONS
858 int extfilter;
859 #endif
860
861 if ((diff = a->proto - b->proto) != 0)
862 return (diff);
863 if ((diff = a->af - b->af) != 0)
864 return (diff);
865
866 #ifndef NO_APPLE_EXTENSIONS
867 extfilter = PF_EXTFILTER_APD;
868
869 switch (a->proto) {
870 case IPPROTO_ICMP:
871 case IPPROTO_ICMPV6:
872 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
873 return (diff);
874 break;
875
876 case IPPROTO_TCP:
877 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
878 return (diff);
879 if ((diff = a->ext.xport.port - b->ext.xport.port) != 0)
880 return (diff);
881 break;
882
883 case IPPROTO_UDP:
884 if ((diff = a->proto_variant - b->proto_variant))
885 return (diff);
886 extfilter = a->proto_variant;
887 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
888 return (diff);
889 if ((extfilter < PF_EXTFILTER_AD) &&
890 (diff = a->ext.xport.port - b->ext.xport.port) != 0)
891 return (diff);
892 break;
893
894 case IPPROTO_GRE:
895 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
896 a->proto_variant == b->proto_variant) {
897 if (!!(diff = a->ext.xport.call_id -
898 b->ext.xport.call_id))
899 return (diff);
900 }
901 break;
902
903 case IPPROTO_ESP:
904 if (!!(diff = a->ext.xport.spi - b->ext.xport.spi))
905 return (diff);
906 break;
907
908 default:
909 break;
910 }
911 #endif
912
913 switch (a->af) {
914 #if INET
915 case AF_INET:
916 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
917 return (1);
918 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
919 return (-1);
920 #ifndef NO_APPLE_EXTENSIONS
921 if (extfilter < PF_EXTFILTER_EI) {
922 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
923 return (1);
924 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
925 return (-1);
926 }
927 #else
928 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
929 return (1);
930 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
931 return (-1);
932 #endif
933 break;
934 #endif /* INET */
935 #if INET6
936 case AF_INET6:
937 #ifndef NO_APPLE_EXTENSIONS
938 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
939 return (1);
940 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
941 return (-1);
942 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
943 return (1);
944 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
945 return (-1);
946 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
947 return (1);
948 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
949 return (-1);
950 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
951 return (1);
952 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
953 return (-1);
954 if (extfilter < PF_EXTFILTER_EI ||
955 !PF_AZERO(&b->ext.addr, AF_INET6)) {
956 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
957 return (1);
958 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
959 return (-1);
960 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
961 return (1);
962 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
963 return (-1);
964 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
965 return (1);
966 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
967 return (-1);
968 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
969 return (1);
970 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
971 return (-1);
972 }
973 #else
974 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
975 return (1);
976 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
977 return (-1);
978 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
979 return (1);
980 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
981 return (-1);
982 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
983 return (1);
984 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
985 return (-1);
986 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
987 return (1);
988 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
989 return (-1);
990 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
991 return (1);
992 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
993 return (-1);
994 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
995 return (1);
996 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
997 return (-1);
998 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
999 return (1);
1000 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
1001 return (-1);
1002 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1003 return (1);
1004 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1005 return (-1);
1006 #endif
1007 break;
1008 #endif /* INET6 */
1009 }
1010
1011 #ifndef NO_APPLE_EXTENSIONS
1012 if (a->app_state && b->app_state) {
1013 if (a->app_state->compare_lan_ext &&
1014 b->app_state->compare_lan_ext) {
1015 diff = (const char *)b->app_state->compare_lan_ext -
1016 (const char *)a->app_state->compare_lan_ext;
1017 if (diff != 0)
1018 return (diff);
1019 diff = a->app_state->compare_lan_ext(a->app_state,
1020 b->app_state);
1021 if (diff != 0)
1022 return (diff);
1023 }
1024 }
1025 #else
1026 if ((diff = a->lan.port - b->lan.port) != 0)
1027 return (diff);
1028 if ((diff = a->ext.port - b->ext.port) != 0)
1029 return (diff);
1030 #endif
1031
1032 return (0);
1033 }
1034
1035 static __inline int
1036 pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b)
1037 {
1038 int diff;
1039 #ifndef NO_APPLE_EXTENSIONS
1040 int extfilter;
1041 #endif
1042
1043 if ((diff = a->proto - b->proto) != 0)
1044 return (diff);
1045
1046 if ((diff = a->af - b->af) != 0)
1047 return (diff);
1048
1049 #ifndef NO_APPLE_EXTENSIONS
1050 extfilter = PF_EXTFILTER_APD;
1051
1052 switch (a->proto) {
1053 case IPPROTO_ICMP:
1054 case IPPROTO_ICMPV6:
1055 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
1056 return (diff);
1057 break;
1058
1059 case IPPROTO_TCP:
1060 if ((diff = a->ext.xport.port - b->ext.xport.port) != 0)
1061 return (diff);
1062 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
1063 return (diff);
1064 break;
1065
1066 case IPPROTO_UDP:
1067 if ((diff = a->proto_variant - b->proto_variant))
1068 return (diff);
1069 extfilter = a->proto_variant;
1070 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
1071 return (diff);
1072 if ((extfilter < PF_EXTFILTER_AD) &&
1073 (diff = a->ext.xport.port - b->ext.xport.port) != 0)
1074 return (diff);
1075 break;
1076
1077 case IPPROTO_GRE:
1078 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
1079 a->proto_variant == b->proto_variant) {
1080 if (!!(diff = a->gwy.xport.call_id -
1081 b->gwy.xport.call_id))
1082 return (diff);
1083 }
1084 break;
1085
1086 case IPPROTO_ESP:
1087 if (!!(diff = a->gwy.xport.spi - b->gwy.xport.spi))
1088 return (diff);
1089 break;
1090
1091 default:
1092 break;
1093 }
1094 #endif
1095
1096 switch (a->af) {
1097 #if INET
1098 case AF_INET:
1099 #ifndef NO_APPLE_EXTENSIONS
1100 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1101 return (1);
1102 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1103 return (-1);
1104 if (extfilter < PF_EXTFILTER_EI) {
1105 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1106 return (1);
1107 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1108 return (-1);
1109 }
1110 #else
1111 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1112 return (1);
1113 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1114 return (-1);
1115 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1116 return (1);
1117 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1118 return (-1);
1119 #endif
1120 break;
1121 #endif /* INET */
1122 #if INET6
1123 case AF_INET6:
1124 #ifndef NO_APPLE_EXTENSIONS
1125 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
1126 return (1);
1127 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
1128 return (-1);
1129 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
1130 return (1);
1131 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
1132 return (-1);
1133 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
1134 return (1);
1135 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
1136 return (-1);
1137 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1138 return (1);
1139 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1140 return (-1);
1141 if (extfilter < PF_EXTFILTER_EI ||
1142 !PF_AZERO(&b->ext.addr, AF_INET6)) {
1143 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
1144 return (1);
1145 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
1146 return (-1);
1147 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
1148 return (1);
1149 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
1150 return (-1);
1151 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
1152 return (1);
1153 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
1154 return (-1);
1155 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1156 return (1);
1157 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1158 return (-1);
1159 }
1160 #else
1161 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
1162 return (1);
1163 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
1164 return (-1);
1165 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
1166 return (1);
1167 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
1168 return (-1);
1169 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
1170 return (1);
1171 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
1172 return (-1);
1173 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
1174 return (1);
1175 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
1176 return (-1);
1177 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
1178 return (1);
1179 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
1180 return (-1);
1181 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
1182 return (1);
1183 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
1184 return (-1);
1185 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1186 return (1);
1187 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1188 return (-1);
1189 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1190 return (1);
1191 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1192 return (-1);
1193 #endif
1194 break;
1195 #endif /* INET6 */
1196 }
1197
1198 #ifndef NO_APPLE_EXTENSIONS
1199 if (a->app_state && b->app_state) {
1200 if (a->app_state->compare_ext_gwy &&
1201 b->app_state->compare_ext_gwy) {
1202 diff = (const char *)b->app_state->compare_ext_gwy -
1203 (const char *)a->app_state->compare_ext_gwy;
1204 if (diff != 0)
1205 return (diff);
1206 diff = a->app_state->compare_ext_gwy(a->app_state,
1207 b->app_state);
1208 if (diff != 0)
1209 return (diff);
1210 }
1211 }
1212 #else
1213 if ((diff = a->ext.port - b->ext.port) != 0)
1214 return (diff);
1215 if ((diff = a->gwy.port - b->gwy.port) != 0)
1216 return (diff);
1217 #endif
1218
1219 return (0);
1220 }
1221
1222 static __inline int
1223 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
1224 {
1225 if (a->id > b->id)
1226 return (1);
1227 if (a->id < b->id)
1228 return (-1);
1229 if (a->creatorid > b->creatorid)
1230 return (1);
1231 if (a->creatorid < b->creatorid)
1232 return (-1);
1233
1234 return (0);
1235 }
1236
1237 #if INET6
1238 void
1239 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
1240 {
1241 switch (af) {
1242 #if INET
1243 case AF_INET:
1244 dst->addr32[0] = src->addr32[0];
1245 break;
1246 #endif /* INET */
1247 case AF_INET6:
1248 dst->addr32[0] = src->addr32[0];
1249 dst->addr32[1] = src->addr32[1];
1250 dst->addr32[2] = src->addr32[2];
1251 dst->addr32[3] = src->addr32[3];
1252 break;
1253 }
1254 }
1255 #endif /* INET6 */
1256
1257 struct pf_state *
1258 pf_find_state_byid(struct pf_state_cmp *key)
1259 {
1260 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1261
1262 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
1263 }
1264
1265 static struct pf_state *
1266 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1267 {
1268 struct pf_state_key *sk = NULL;
1269 struct pf_state *s;
1270
1271 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1272
1273 switch (dir) {
1274 case PF_OUT:
1275 sk = RB_FIND(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1276 (struct pf_state_key *)key);
1277 break;
1278 case PF_IN:
1279 sk = RB_FIND(pf_state_tree_ext_gwy, &pf_statetbl_ext_gwy,
1280 (struct pf_state_key *)key);
1281 break;
1282 default:
1283 panic("pf_find_state");
1284 }
1285
1286 /* list is sorted, if-bound states before floating ones */
1287 if (sk != NULL)
1288 TAILQ_FOREACH(s, &sk->states, next)
1289 if (s->kif == pfi_all || s->kif == kif)
1290 return (s);
1291
1292 return (NULL);
1293 }
1294
1295 struct pf_state *
1296 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1297 {
1298 struct pf_state_key *sk = NULL;
1299 struct pf_state *s, *ret = NULL;
1300
1301 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1302
1303 switch (dir) {
1304 case PF_OUT:
1305 sk = RB_FIND(pf_state_tree_lan_ext,
1306 &pf_statetbl_lan_ext, (struct pf_state_key *)key);
1307 break;
1308 case PF_IN:
1309 sk = RB_FIND(pf_state_tree_ext_gwy,
1310 &pf_statetbl_ext_gwy, (struct pf_state_key *)key);
1311 break;
1312 default:
1313 panic("pf_find_state_all");
1314 }
1315
1316 if (sk != NULL) {
1317 ret = TAILQ_FIRST(&sk->states);
1318 if (more == NULL)
1319 return (ret);
1320
1321 TAILQ_FOREACH(s, &sk->states, next)
1322 (*more)++;
1323 }
1324
1325 return (ret);
1326 }
1327
1328 static void
1329 pf_init_threshold(struct pf_threshold *threshold,
1330 u_int32_t limit, u_int32_t seconds)
1331 {
1332 threshold->limit = limit * PF_THRESHOLD_MULT;
1333 threshold->seconds = seconds;
1334 threshold->count = 0;
1335 threshold->last = pf_time_second();
1336 }
1337
1338 static void
1339 pf_add_threshold(struct pf_threshold *threshold)
1340 {
1341 u_int32_t t = pf_time_second(), diff = t - threshold->last;
1342
1343 if (diff >= threshold->seconds)
1344 threshold->count = 0;
1345 else
1346 threshold->count -= threshold->count * diff /
1347 threshold->seconds;
1348 threshold->count += PF_THRESHOLD_MULT;
1349 threshold->last = t;
1350 }
1351
1352 static int
1353 pf_check_threshold(struct pf_threshold *threshold)
1354 {
1355 return (threshold->count > threshold->limit);
1356 }
1357
1358 static int
1359 pf_src_connlimit(struct pf_state **state)
1360 {
1361 int bad = 0;
1362
1363 (*state)->src_node->conn++;
1364 VERIFY((*state)->src_node->conn != 0);
1365 (*state)->src.tcp_est = 1;
1366 pf_add_threshold(&(*state)->src_node->conn_rate);
1367
1368 if ((*state)->rule.ptr->max_src_conn &&
1369 (*state)->rule.ptr->max_src_conn <
1370 (*state)->src_node->conn) {
1371 pf_status.lcounters[LCNT_SRCCONN]++;
1372 bad++;
1373 }
1374
1375 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
1376 pf_check_threshold(&(*state)->src_node->conn_rate)) {
1377 pf_status.lcounters[LCNT_SRCCONNRATE]++;
1378 bad++;
1379 }
1380
1381 if (!bad)
1382 return (0);
1383
1384 if ((*state)->rule.ptr->overload_tbl) {
1385 struct pfr_addr p;
1386 u_int32_t killed = 0;
1387
1388 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
1389 if (pf_status.debug >= PF_DEBUG_MISC) {
1390 printf("pf_src_connlimit: blocking address ");
1391 pf_print_host(&(*state)->src_node->addr, 0,
1392 (*state)->state_key->af);
1393 }
1394
1395 bzero(&p, sizeof (p));
1396 p.pfra_af = (*state)->state_key->af;
1397 switch ((*state)->state_key->af) {
1398 #if INET
1399 case AF_INET:
1400 p.pfra_net = 32;
1401 p.pfra_ip4addr = (*state)->src_node->addr.v4;
1402 break;
1403 #endif /* INET */
1404 #if INET6
1405 case AF_INET6:
1406 p.pfra_net = 128;
1407 p.pfra_ip6addr = (*state)->src_node->addr.v6;
1408 break;
1409 #endif /* INET6 */
1410 }
1411
1412 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
1413 &p, pf_time_second());
1414
1415 /* kill existing states if that's required. */
1416 if ((*state)->rule.ptr->flush) {
1417 struct pf_state_key *sk;
1418 struct pf_state *st;
1419
1420 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
1421 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
1422 sk = st->state_key;
1423 /*
1424 * Kill states from this source. (Only those
1425 * from the same rule if PF_FLUSH_GLOBAL is not
1426 * set)
1427 */
1428 if (sk->af ==
1429 (*state)->state_key->af &&
1430 (((*state)->state_key->direction ==
1431 PF_OUT &&
1432 PF_AEQ(&(*state)->src_node->addr,
1433 &sk->lan.addr, sk->af)) ||
1434 ((*state)->state_key->direction == PF_IN &&
1435 PF_AEQ(&(*state)->src_node->addr,
1436 &sk->ext.addr, sk->af))) &&
1437 ((*state)->rule.ptr->flush &
1438 PF_FLUSH_GLOBAL ||
1439 (*state)->rule.ptr == st->rule.ptr)) {
1440 st->timeout = PFTM_PURGE;
1441 st->src.state = st->dst.state =
1442 TCPS_CLOSED;
1443 killed++;
1444 }
1445 }
1446 if (pf_status.debug >= PF_DEBUG_MISC)
1447 printf(", %u states killed", killed);
1448 }
1449 if (pf_status.debug >= PF_DEBUG_MISC)
1450 printf("\n");
1451 }
1452
1453 /* kill this state */
1454 (*state)->timeout = PFTM_PURGE;
1455 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
1456 return (1);
1457 }
1458
1459 int
1460 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
1461 struct pf_addr *src, sa_family_t af)
1462 {
1463 struct pf_src_node k;
1464
1465 if (*sn == NULL) {
1466 k.af = af;
1467 PF_ACPY(&k.addr, src, af);
1468 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1469 rule->rpool.opts & PF_POOL_STICKYADDR)
1470 k.rule.ptr = rule;
1471 else
1472 k.rule.ptr = NULL;
1473 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
1474 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
1475 }
1476 if (*sn == NULL) {
1477 if (!rule->max_src_nodes ||
1478 rule->src_nodes < rule->max_src_nodes)
1479 (*sn) = pool_get(&pf_src_tree_pl, PR_WAITOK);
1480 else
1481 pf_status.lcounters[LCNT_SRCNODES]++;
1482 if ((*sn) == NULL)
1483 return (-1);
1484 bzero(*sn, sizeof (struct pf_src_node));
1485
1486 pf_init_threshold(&(*sn)->conn_rate,
1487 rule->max_src_conn_rate.limit,
1488 rule->max_src_conn_rate.seconds);
1489
1490 (*sn)->af = af;
1491 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1492 rule->rpool.opts & PF_POOL_STICKYADDR)
1493 (*sn)->rule.ptr = rule;
1494 else
1495 (*sn)->rule.ptr = NULL;
1496 PF_ACPY(&(*sn)->addr, src, af);
1497 if (RB_INSERT(pf_src_tree,
1498 &tree_src_tracking, *sn) != NULL) {
1499 if (pf_status.debug >= PF_DEBUG_MISC) {
1500 printf("pf: src_tree insert failed: ");
1501 pf_print_host(&(*sn)->addr, 0, af);
1502 printf("\n");
1503 }
1504 pool_put(&pf_src_tree_pl, *sn);
1505 return (-1);
1506 }
1507 (*sn)->creation = pf_time_second();
1508 (*sn)->ruletype = rule->action;
1509 if ((*sn)->rule.ptr != NULL)
1510 (*sn)->rule.ptr->src_nodes++;
1511 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
1512 pf_status.src_nodes++;
1513 } else {
1514 if (rule->max_src_states &&
1515 (*sn)->states >= rule->max_src_states) {
1516 pf_status.lcounters[LCNT_SRCSTATES]++;
1517 return (-1);
1518 }
1519 }
1520 return (0);
1521 }
1522
1523 static void
1524 pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif)
1525 {
1526 struct pf_state_key *sk = s->state_key;
1527
1528 if (pf_status.debug >= PF_DEBUG_MISC) {
1529 #ifndef NO_APPLE_EXTENSIONS
1530 printf("pf: state insert failed: %s %s ", tree, kif->pfik_name);
1531 switch (sk->proto) {
1532 case IPPROTO_TCP:
1533 printf("TCP");
1534 break;
1535 case IPPROTO_UDP:
1536 printf("UDP");
1537 break;
1538 case IPPROTO_ICMP:
1539 printf("ICMP4");
1540 break;
1541 case IPPROTO_ICMPV6:
1542 printf("ICMP6");
1543 break;
1544 default:
1545 printf("PROTO=%u", sk->proto);
1546 break;
1547 }
1548 printf(" lan: ");
1549 pf_print_sk_host(&sk->lan, sk->af, sk->proto,
1550 sk->proto_variant);
1551 printf(" gwy: ");
1552 pf_print_sk_host(&sk->gwy, sk->af, sk->proto,
1553 sk->proto_variant);
1554 printf(" ext: ");
1555 pf_print_sk_host(&sk->ext, sk->af, sk->proto,
1556 sk->proto_variant);
1557 #else
1558 printf("pf: state insert failed: %s %s", tree, kif->pfik_name);
1559 printf(" lan: ");
1560 pf_print_host(&sk->lan.addr, sk->lan.port,
1561 sk->af);
1562 printf(" gwy: ");
1563 pf_print_host(&sk->gwy.addr, sk->gwy.port,
1564 sk->af);
1565 printf(" ext: ");
1566 pf_print_host(&sk->ext.addr, sk->ext.port,
1567 sk->af);
1568 #endif
1569 if (s->sync_flags & PFSTATE_FROMSYNC)
1570 printf(" (from sync)");
1571 printf("\n");
1572 }
1573 }
1574
1575 int
1576 pf_insert_state(struct pfi_kif *kif, struct pf_state *s)
1577 {
1578 struct pf_state_key *cur;
1579 struct pf_state *sp;
1580
1581 VERIFY(s->state_key != NULL);
1582 s->kif = kif;
1583
1584 if ((cur = RB_INSERT(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1585 s->state_key)) != NULL) {
1586 /* key exists. check for same kif, if none, add to key */
1587 TAILQ_FOREACH(sp, &cur->states, next)
1588 if (sp->kif == kif) { /* collision! */
1589 pf_stateins_err("tree_lan_ext", s, kif);
1590 pf_detach_state(s,
1591 PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1592 return (-1);
1593 }
1594 pf_detach_state(s, PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1595 pf_attach_state(cur, s, kif == pfi_all ? 1 : 0);
1596 }
1597
1598 /* if cur != NULL, we already found a state key and attached to it */
1599 if (cur == NULL && (cur = RB_INSERT(pf_state_tree_ext_gwy,
1600 &pf_statetbl_ext_gwy, s->state_key)) != NULL) {
1601 /* must not happen. we must have found the sk above! */
1602 pf_stateins_err("tree_ext_gwy", s, kif);
1603 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
1604 return (-1);
1605 }
1606
1607 if (s->id == 0 && s->creatorid == 0) {
1608 s->id = htobe64(pf_status.stateid++);
1609 s->creatorid = pf_status.hostid;
1610 }
1611 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1612 if (pf_status.debug >= PF_DEBUG_MISC) {
1613 printf("pf: state insert failed: "
1614 "id: %016llx creatorid: %08x",
1615 be64toh(s->id), ntohl(s->creatorid));
1616 if (s->sync_flags & PFSTATE_FROMSYNC)
1617 printf(" (from sync)");
1618 printf("\n");
1619 }
1620 pf_detach_state(s, 0);
1621 return (-1);
1622 }
1623 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1624 pf_status.fcounters[FCNT_STATE_INSERT]++;
1625 pf_status.states++;
1626 VERIFY(pf_status.states != 0);
1627 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1628 #if NPFSYNC
1629 pfsync_insert_state(s);
1630 #endif
1631 return (0);
1632 }
1633
1634 void
1635 pf_purge_thread_fn(void *v, wait_result_t w)
1636 {
1637 #pragma unused(v, w)
1638 u_int32_t nloops = 0;
1639 int t = 0;
1640
1641 for (;;) {
1642 (void) tsleep(pf_purge_thread_fn, PWAIT, "pftm", t * hz);
1643
1644 lck_rw_lock_shared(pf_perim_lock);
1645 lck_mtx_lock(pf_lock);
1646
1647 /* purge everything if not running */
1648 if (!pf_status.running) {
1649 pf_purge_expired_states(pf_status.states);
1650 pf_purge_expired_fragments();
1651 pf_purge_expired_src_nodes();
1652
1653 /* terminate thread (we don't currently do this) */
1654 if (pf_purge_thread == NULL) {
1655 lck_mtx_unlock(pf_lock);
1656 lck_rw_done(pf_perim_lock);
1657
1658 thread_deallocate(current_thread());
1659 thread_terminate(current_thread());
1660 /* NOTREACHED */
1661 return;
1662 } else {
1663 /* if there's nothing left, sleep w/o timeout */
1664 if (pf_status.states == 0 &&
1665 pf_normalize_isempty() &&
1666 RB_EMPTY(&tree_src_tracking))
1667 t = 0;
1668
1669 lck_mtx_unlock(pf_lock);
1670 lck_rw_done(pf_perim_lock);
1671 continue;
1672 }
1673 } else if (t == 0) {
1674 /* Set timeout to 1 second */
1675 t = 1;
1676 }
1677
1678 /* process a fraction of the state table every second */
1679 pf_purge_expired_states(1 + (pf_status.states
1680 / pf_default_rule.timeout[PFTM_INTERVAL]));
1681
1682 /* purge other expired types every PFTM_INTERVAL seconds */
1683 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1684 pf_purge_expired_fragments();
1685 pf_purge_expired_src_nodes();
1686 nloops = 0;
1687 }
1688
1689 lck_mtx_unlock(pf_lock);
1690 lck_rw_done(pf_perim_lock);
1691 }
1692 }
1693
1694 u_int64_t
1695 pf_state_expires(const struct pf_state *state)
1696 {
1697 u_int32_t t;
1698 u_int32_t start;
1699 u_int32_t end;
1700 u_int32_t states;
1701
1702 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1703
1704 /* handle all PFTM_* > PFTM_MAX here */
1705 if (state->timeout == PFTM_PURGE)
1706 return (pf_time_second());
1707 if (state->timeout == PFTM_UNTIL_PACKET)
1708 return (0);
1709 VERIFY(state->timeout != PFTM_UNLINKED);
1710 VERIFY(state->timeout < PFTM_MAX);
1711 t = state->rule.ptr->timeout[state->timeout];
1712 if (!t)
1713 t = pf_default_rule.timeout[state->timeout];
1714 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1715 if (start) {
1716 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1717 states = state->rule.ptr->states;
1718 } else {
1719 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1720 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1721 states = pf_status.states;
1722 }
1723 if (end && states > start && start < end) {
1724 if (states < end)
1725 return (state->expire + t * (end - states) /
1726 (end - start));
1727 else
1728 return (pf_time_second());
1729 }
1730 return (state->expire + t);
1731 }
1732
1733 void
1734 pf_purge_expired_src_nodes(void)
1735 {
1736 struct pf_src_node *cur, *next;
1737
1738 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1739
1740 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1741 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1742
1743 if (cur->states <= 0 && cur->expire <= pf_time_second()) {
1744 if (cur->rule.ptr != NULL) {
1745 cur->rule.ptr->src_nodes--;
1746 if (cur->rule.ptr->states <= 0 &&
1747 cur->rule.ptr->max_src_nodes <= 0)
1748 pf_rm_rule(NULL, cur->rule.ptr);
1749 }
1750 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1751 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1752 pf_status.src_nodes--;
1753 pool_put(&pf_src_tree_pl, cur);
1754 }
1755 }
1756 }
1757
1758 void
1759 pf_src_tree_remove_state(struct pf_state *s)
1760 {
1761 u_int32_t t;
1762
1763 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1764
1765 if (s->src_node != NULL) {
1766 if (s->src.tcp_est) {
1767 VERIFY(s->src_node->conn > 0);
1768 --s->src_node->conn;
1769 }
1770 VERIFY(s->src_node->states > 0);
1771 if (--s->src_node->states <= 0) {
1772 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1773 if (!t)
1774 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1775 s->src_node->expire = pf_time_second() + t;
1776 }
1777 }
1778 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1779 VERIFY(s->nat_src_node->states > 0);
1780 if (--s->nat_src_node->states <= 0) {
1781 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1782 if (!t)
1783 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1784 s->nat_src_node->expire = pf_time_second() + t;
1785 }
1786 }
1787 s->src_node = s->nat_src_node = NULL;
1788 }
1789
1790 void
1791 pf_unlink_state(struct pf_state *cur)
1792 {
1793 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1794
1795 #ifndef NO_APPLE_EXTENSIONS
1796 if (cur->src.state == PF_TCPS_PROXY_DST) {
1797 pf_send_tcp(cur->rule.ptr, cur->state_key->af,
1798 &cur->state_key->ext.addr, &cur->state_key->lan.addr,
1799 cur->state_key->ext.xport.port,
1800 cur->state_key->lan.xport.port,
1801 cur->src.seqhi, cur->src.seqlo + 1,
1802 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1803 }
1804
1805 hook_runloop(&cur->unlink_hooks, HOOK_REMOVE|HOOK_FREE);
1806 #else
1807 if (cur->src.state == PF_TCPS_PROXY_DST) {
1808 pf_send_tcp(cur->rule.ptr, cur->state_key->af,
1809 &cur->state_key->ext.addr, &cur->state_key->lan.addr,
1810 cur->state_key->ext.port, cur->state_key->lan.port,
1811 cur->src.seqhi, cur->src.seqlo + 1,
1812 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1813 }
1814 #endif
1815 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1816 #if NPFSYNC
1817 if (cur->creatorid == pf_status.hostid)
1818 pfsync_delete_state(cur);
1819 #endif
1820 cur->timeout = PFTM_UNLINKED;
1821 pf_src_tree_remove_state(cur);
1822 pf_detach_state(cur, 0);
1823 }
1824
1825 /* callers should be at splpf and hold the
1826 * write_lock on pf_consistency_lock */
1827 void
1828 pf_free_state(struct pf_state *cur)
1829 {
1830 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1831 #if NPFSYNC
1832 if (pfsyncif != NULL &&
1833 (pfsyncif->sc_bulk_send_next == cur ||
1834 pfsyncif->sc_bulk_terminator == cur))
1835 return;
1836 #endif
1837 VERIFY(cur->timeout == PFTM_UNLINKED);
1838 VERIFY(cur->rule.ptr->states > 0);
1839 if (--cur->rule.ptr->states <= 0 &&
1840 cur->rule.ptr->src_nodes <= 0)
1841 pf_rm_rule(NULL, cur->rule.ptr);
1842 if (cur->nat_rule.ptr != NULL) {
1843 VERIFY(cur->nat_rule.ptr->states > 0);
1844 if (--cur->nat_rule.ptr->states <= 0 &&
1845 cur->nat_rule.ptr->src_nodes <= 0)
1846 pf_rm_rule(NULL, cur->nat_rule.ptr);
1847 }
1848 if (cur->anchor.ptr != NULL) {
1849 VERIFY(cur->anchor.ptr->states > 0);
1850 if (--cur->anchor.ptr->states <= 0)
1851 pf_rm_rule(NULL, cur->anchor.ptr);
1852 }
1853 pf_normalize_tcp_cleanup(cur);
1854 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1855 TAILQ_REMOVE(&state_list, cur, entry_list);
1856 if (cur->tag)
1857 pf_tag_unref(cur->tag);
1858 pool_put(&pf_state_pl, cur);
1859 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1860 VERIFY(pf_status.states > 0);
1861 pf_status.states--;
1862 }
1863
1864 void
1865 pf_purge_expired_states(u_int32_t maxcheck)
1866 {
1867 static struct pf_state *cur = NULL;
1868 struct pf_state *next;
1869
1870 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1871
1872 while (maxcheck--) {
1873 /* wrap to start of list when we hit the end */
1874 if (cur == NULL) {
1875 cur = TAILQ_FIRST(&state_list);
1876 if (cur == NULL)
1877 break; /* list empty */
1878 }
1879
1880 /* get next state, as cur may get deleted */
1881 next = TAILQ_NEXT(cur, entry_list);
1882
1883 if (cur->timeout == PFTM_UNLINKED) {
1884 pf_free_state(cur);
1885 } else if (pf_state_expires(cur) <= pf_time_second()) {
1886 /* unlink and free expired state */
1887 pf_unlink_state(cur);
1888 pf_free_state(cur);
1889 }
1890 cur = next;
1891 }
1892 }
1893
1894 int
1895 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1896 {
1897 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1898
1899 if (aw->type != PF_ADDR_TABLE)
1900 return (0);
1901 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
1902 return (1);
1903 return (0);
1904 }
1905
1906 void
1907 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1908 {
1909 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1910
1911 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1912 return;
1913 pfr_detach_table(aw->p.tbl);
1914 aw->p.tbl = NULL;
1915 }
1916
1917 void
1918 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1919 {
1920 struct pfr_ktable *kt = aw->p.tbl;
1921
1922 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1923
1924 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1925 return;
1926 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1927 kt = kt->pfrkt_root;
1928 aw->p.tbl = NULL;
1929 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1930 kt->pfrkt_cnt : -1;
1931 }
1932
1933 #ifndef NO_APPLE_EXTENSIONS
1934 static void
1935 pf_print_addr(struct pf_addr *addr, sa_family_t af)
1936 {
1937 switch (af) {
1938 #if INET
1939 case AF_INET: {
1940 u_int32_t a = ntohl(addr->addr32[0]);
1941 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1942 (a>>8)&255, a&255);
1943 break;
1944 }
1945 #endif /* INET */
1946 #if INET6
1947 case AF_INET6: {
1948 u_int16_t b;
1949 u_int8_t i, curstart = 255, curend = 0,
1950 maxstart = 0, maxend = 0;
1951 for (i = 0; i < 8; i++) {
1952 if (!addr->addr16[i]) {
1953 if (curstart == 255)
1954 curstart = i;
1955 else
1956 curend = i;
1957 } else {
1958 if (curstart) {
1959 if ((curend - curstart) >
1960 (maxend - maxstart)) {
1961 maxstart = curstart;
1962 maxend = curend;
1963 curstart = 255;
1964 }
1965 }
1966 }
1967 }
1968 for (i = 0; i < 8; i++) {
1969 if (i >= maxstart && i <= maxend) {
1970 if (maxend != 7) {
1971 if (i == maxstart)
1972 printf(":");
1973 } else {
1974 if (i == maxend)
1975 printf(":");
1976 }
1977 } else {
1978 b = ntohs(addr->addr16[i]);
1979 printf("%x", b);
1980 if (i < 7)
1981 printf(":");
1982 }
1983 }
1984 break;
1985 }
1986 #endif /* INET6 */
1987 }
1988 }
1989
1990 static void
1991 pf_print_sk_host(struct pf_state_host *sh, sa_family_t af, int proto,
1992 u_int8_t proto_variant)
1993 {
1994 pf_print_addr(&sh->addr, af);
1995
1996 switch (proto) {
1997 case IPPROTO_ESP:
1998 if (sh->xport.spi)
1999 printf("[%08x]", ntohl(sh->xport.spi));
2000 break;
2001
2002 case IPPROTO_GRE:
2003 if (proto_variant == PF_GRE_PPTP_VARIANT)
2004 printf("[%u]", ntohs(sh->xport.call_id));
2005 break;
2006
2007 case IPPROTO_TCP:
2008 case IPPROTO_UDP:
2009 printf("[%u]", ntohs(sh->xport.port));
2010 break;
2011
2012 default:
2013 break;
2014 }
2015 }
2016 #endif
2017
2018 static void
2019 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2020 {
2021 #ifndef NO_APPLE_EXTENSIONS
2022 pf_print_addr(addr, af);
2023 if (p)
2024 printf("[%u]", ntohs(p));
2025 #else
2026 switch (af) {
2027 #if INET
2028 case AF_INET: {
2029 u_int32_t a = ntohl(addr->addr32[0]);
2030 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2031 (a>>8)&255, a&255);
2032 if (p) {
2033 p = ntohs(p);
2034 printf(":%u", p);
2035 }
2036 break;
2037 }
2038 #endif /* INET */
2039 #if INET6
2040 case AF_INET6: {
2041 u_int16_t b;
2042 u_int8_t i, curstart = 255, curend = 0,
2043 maxstart = 0, maxend = 0;
2044 for (i = 0; i < 8; i++) {
2045 if (!addr->addr16[i]) {
2046 if (curstart == 255)
2047 curstart = i;
2048 else
2049 curend = i;
2050 } else {
2051 if (curstart) {
2052 if ((curend - curstart) >
2053 (maxend - maxstart)) {
2054 maxstart = curstart;
2055 maxend = curend;
2056 curstart = 255;
2057 }
2058 }
2059 }
2060 }
2061 for (i = 0; i < 8; i++) {
2062 if (i >= maxstart && i <= maxend) {
2063 if (maxend != 7) {
2064 if (i == maxstart)
2065 printf(":");
2066 } else {
2067 if (i == maxend)
2068 printf(":");
2069 }
2070 } else {
2071 b = ntohs(addr->addr16[i]);
2072 printf("%x", b);
2073 if (i < 7)
2074 printf(":");
2075 }
2076 }
2077 if (p) {
2078 p = ntohs(p);
2079 printf("[%u]", p);
2080 }
2081 break;
2082 }
2083 #endif /* INET6 */
2084 }
2085 #endif
2086 }
2087
2088 void
2089 pf_print_state(struct pf_state *s)
2090 {
2091 struct pf_state_key *sk = s->state_key;
2092 switch (sk->proto) {
2093 #ifndef NO_APPLE_EXTENSIONS
2094 case IPPROTO_ESP:
2095 printf("ESP ");
2096 break;
2097 case IPPROTO_GRE:
2098 printf("GRE%u ", sk->proto_variant);
2099 break;
2100 #endif
2101 case IPPROTO_TCP:
2102 printf("TCP ");
2103 break;
2104 case IPPROTO_UDP:
2105 printf("UDP ");
2106 break;
2107 case IPPROTO_ICMP:
2108 printf("ICMP ");
2109 break;
2110 case IPPROTO_ICMPV6:
2111 printf("ICMPV6 ");
2112 break;
2113 default:
2114 printf("%u ", sk->proto);
2115 break;
2116 }
2117 #ifndef NO_APPLE_EXTENSIONS
2118 pf_print_sk_host(&sk->lan, sk->af, sk->proto, sk->proto_variant);
2119 printf(" ");
2120 pf_print_sk_host(&sk->gwy, sk->af, sk->proto, sk->proto_variant);
2121 printf(" ");
2122 pf_print_sk_host(&sk->ext, sk->af, sk->proto, sk->proto_variant);
2123 #else
2124 pf_print_host(&sk->lan.addr, sk->lan.port, sk->af);
2125 printf(" ");
2126 pf_print_host(&sk->gwy.addr, sk->gwy.port, sk->af);
2127 printf(" ");
2128 pf_print_host(&sk->ext.addr, sk->ext.port, sk->af);
2129 #endif
2130 printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
2131 s->src.seqhi, s->src.max_win, s->src.seqdiff);
2132 if (s->src.wscale && s->dst.wscale)
2133 printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
2134 printf("]");
2135 printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
2136 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
2137 if (s->src.wscale && s->dst.wscale)
2138 printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
2139 printf("]");
2140 printf(" %u:%u", s->src.state, s->dst.state);
2141 }
2142
2143 void
2144 pf_print_flags(u_int8_t f)
2145 {
2146 if (f)
2147 printf(" ");
2148 if (f & TH_FIN)
2149 printf("F");
2150 if (f & TH_SYN)
2151 printf("S");
2152 if (f & TH_RST)
2153 printf("R");
2154 if (f & TH_PUSH)
2155 printf("P");
2156 if (f & TH_ACK)
2157 printf("A");
2158 if (f & TH_URG)
2159 printf("U");
2160 if (f & TH_ECE)
2161 printf("E");
2162 if (f & TH_CWR)
2163 printf("W");
2164 }
2165
2166 #define PF_SET_SKIP_STEPS(i) \
2167 do { \
2168 while (head[i] != cur) { \
2169 head[i]->skip[i].ptr = cur; \
2170 head[i] = TAILQ_NEXT(head[i], entries); \
2171 } \
2172 } while (0)
2173
2174 void
2175 pf_calc_skip_steps(struct pf_rulequeue *rules)
2176 {
2177 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
2178 int i;
2179
2180 cur = TAILQ_FIRST(rules);
2181 prev = cur;
2182 for (i = 0; i < PF_SKIP_COUNT; ++i)
2183 head[i] = cur;
2184 while (cur != NULL) {
2185
2186 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2187 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2188 if (cur->direction != prev->direction)
2189 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2190 if (cur->af != prev->af)
2191 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2192 if (cur->proto != prev->proto)
2193 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2194 if (cur->src.neg != prev->src.neg ||
2195 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2196 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2197 #ifndef NO_APPLE_EXTENSIONS
2198 {
2199 union pf_rule_xport *cx = &cur->src.xport;
2200 union pf_rule_xport *px = &prev->src.xport;
2201
2202 switch (cur->proto) {
2203 case IPPROTO_GRE:
2204 case IPPROTO_ESP:
2205 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2206 break;
2207 default:
2208 if (prev->proto == IPPROTO_GRE ||
2209 prev->proto == IPPROTO_ESP ||
2210 cx->range.op != px->range.op ||
2211 cx->range.port[0] != px->range.port[0] ||
2212 cx->range.port[1] != px->range.port[1])
2213 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2214 break;
2215 }
2216 }
2217 #else
2218 if (cur->src.port[0] != prev->src.port[0] ||
2219 cur->src.port[1] != prev->src.port[1] ||
2220 cur->src.port_op != prev->src.port_op)
2221 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2222 #endif
2223 if (cur->dst.neg != prev->dst.neg ||
2224 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2225 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2226 #ifndef NO_APPLE_EXTENSIONS
2227 {
2228 union pf_rule_xport *cx = &cur->dst.xport;
2229 union pf_rule_xport *px = &prev->dst.xport;
2230
2231 switch (cur->proto) {
2232 case IPPROTO_GRE:
2233 if (cur->proto != prev->proto ||
2234 cx->call_id != px->call_id)
2235 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2236 break;
2237 case IPPROTO_ESP:
2238 if (cur->proto != prev->proto ||
2239 cx->spi != px->spi)
2240 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2241 break;
2242 default:
2243 if (prev->proto == IPPROTO_GRE ||
2244 prev->proto == IPPROTO_ESP ||
2245 cx->range.op != px->range.op ||
2246 cx->range.port[0] != px->range.port[0] ||
2247 cx->range.port[1] != px->range.port[1])
2248 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2249 break;
2250 }
2251 }
2252 #else
2253 if (cur->dst.port[0] != prev->dst.port[0] ||
2254 cur->dst.port[1] != prev->dst.port[1] ||
2255 cur->dst.port_op != prev->dst.port_op)
2256 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2257 #endif
2258
2259 prev = cur;
2260 cur = TAILQ_NEXT(cur, entries);
2261 }
2262 for (i = 0; i < PF_SKIP_COUNT; ++i)
2263 PF_SET_SKIP_STEPS(i);
2264 }
2265
2266 static int
2267 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2268 {
2269 if (aw1->type != aw2->type)
2270 return (1);
2271 switch (aw1->type) {
2272 case PF_ADDR_ADDRMASK:
2273 case PF_ADDR_RANGE:
2274 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
2275 return (1);
2276 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
2277 return (1);
2278 return (0);
2279 case PF_ADDR_DYNIFTL:
2280 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2281 case PF_ADDR_NOROUTE:
2282 case PF_ADDR_URPFFAILED:
2283 return (0);
2284 case PF_ADDR_TABLE:
2285 return (aw1->p.tbl != aw2->p.tbl);
2286 case PF_ADDR_RTLABEL:
2287 return (aw1->v.rtlabel != aw2->v.rtlabel);
2288 default:
2289 printf("invalid address type: %d\n", aw1->type);
2290 return (1);
2291 }
2292 }
2293
2294 u_int16_t
2295 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2296 {
2297 u_int32_t l;
2298
2299 if (udp && !cksum)
2300 return (0);
2301 l = cksum + old - new;
2302 l = (l >> 16) + (l & 0xffff);
2303 l = l & 0xffff;
2304 if (udp && !l)
2305 return (0xffff);
2306 return (l);
2307 }
2308
2309 static void
2310 pf_change_ap(int dir, struct mbuf *m, struct pf_addr *a, u_int16_t *p,
2311 u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn,
2312 u_int8_t u, sa_family_t af)
2313 {
2314 struct pf_addr ao;
2315 u_int16_t po = *p;
2316
2317 PF_ACPY(&ao, a, af);
2318 PF_ACPY(a, an, af);
2319
2320 *p = pn;
2321
2322 switch (af) {
2323 #if INET
2324 case AF_INET:
2325 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2326 ao.addr16[0], an->addr16[0], 0),
2327 ao.addr16[1], an->addr16[1], 0);
2328 *p = pn;
2329 /*
2330 * If the packet is originated from an ALG on the NAT gateway
2331 * (source address is loopback or local), in which case the
2332 * TCP/UDP checksum field contains the pseudo header checksum
2333 * that's not yet complemented.
2334 */
2335 if (dir == PF_OUT && m != NULL &&
2336 (m->m_flags & M_PKTHDR) &&
2337 (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) {
2338 /* Pseudo-header checksum does not include ports */
2339 *pc = ~pf_cksum_fixup(pf_cksum_fixup(~*pc,
2340 ao.addr16[0], an->addr16[0], u),
2341 ao.addr16[1], an->addr16[1], u);
2342 } else {
2343 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2344 ao.addr16[0], an->addr16[0], u),
2345 ao.addr16[1], an->addr16[1], u),
2346 po, pn, u);
2347 }
2348 break;
2349 #endif /* INET */
2350 #if INET6
2351 case AF_INET6:
2352 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2353 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2354 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2355 ao.addr16[0], an->addr16[0], u),
2356 ao.addr16[1], an->addr16[1], u),
2357 ao.addr16[2], an->addr16[2], u),
2358 ao.addr16[3], an->addr16[3], u),
2359 ao.addr16[4], an->addr16[4], u),
2360 ao.addr16[5], an->addr16[5], u),
2361 ao.addr16[6], an->addr16[6], u),
2362 ao.addr16[7], an->addr16[7], u),
2363 po, pn, u);
2364 break;
2365 #endif /* INET6 */
2366 }
2367 }
2368
2369
2370 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2371 void
2372 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2373 {
2374 u_int32_t ao;
2375
2376 memcpy(&ao, a, sizeof (ao));
2377 memcpy(a, &an, sizeof (u_int32_t));
2378 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2379 ao % 65536, an % 65536, u);
2380 }
2381
2382 #if INET6
2383 static void
2384 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2385 {
2386 struct pf_addr ao;
2387
2388 PF_ACPY(&ao, a, AF_INET6);
2389 PF_ACPY(a, an, AF_INET6);
2390
2391 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2392 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2393 pf_cksum_fixup(pf_cksum_fixup(*c,
2394 ao.addr16[0], an->addr16[0], u),
2395 ao.addr16[1], an->addr16[1], u),
2396 ao.addr16[2], an->addr16[2], u),
2397 ao.addr16[3], an->addr16[3], u),
2398 ao.addr16[4], an->addr16[4], u),
2399 ao.addr16[5], an->addr16[5], u),
2400 ao.addr16[6], an->addr16[6], u),
2401 ao.addr16[7], an->addr16[7], u);
2402 }
2403 #endif /* INET6 */
2404
2405 static void
2406 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2407 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2408 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2409 {
2410 struct pf_addr oia, ooa;
2411
2412 PF_ACPY(&oia, ia, af);
2413 PF_ACPY(&ooa, oa, af);
2414
2415 /* Change inner protocol port, fix inner protocol checksum. */
2416 if (ip != NULL) {
2417 u_int16_t oip = *ip;
2418 u_int32_t opc = 0;
2419
2420 if (pc != NULL)
2421 opc = *pc;
2422 *ip = np;
2423 if (pc != NULL)
2424 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2425 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2426 if (pc != NULL)
2427 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2428 }
2429 /* Change inner ip address, fix inner ip and icmp checksums. */
2430 PF_ACPY(ia, na, af);
2431 switch (af) {
2432 #if INET
2433 case AF_INET: {
2434 u_int32_t oh2c = *h2c;
2435
2436 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2437 oia.addr16[0], ia->addr16[0], 0),
2438 oia.addr16[1], ia->addr16[1], 0);
2439 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2440 oia.addr16[0], ia->addr16[0], 0),
2441 oia.addr16[1], ia->addr16[1], 0);
2442 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2443 break;
2444 }
2445 #endif /* INET */
2446 #if INET6
2447 case AF_INET6:
2448 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2449 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2450 pf_cksum_fixup(pf_cksum_fixup(*ic,
2451 oia.addr16[0], ia->addr16[0], u),
2452 oia.addr16[1], ia->addr16[1], u),
2453 oia.addr16[2], ia->addr16[2], u),
2454 oia.addr16[3], ia->addr16[3], u),
2455 oia.addr16[4], ia->addr16[4], u),
2456 oia.addr16[5], ia->addr16[5], u),
2457 oia.addr16[6], ia->addr16[6], u),
2458 oia.addr16[7], ia->addr16[7], u);
2459 break;
2460 #endif /* INET6 */
2461 }
2462 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2463 PF_ACPY(oa, na, af);
2464 switch (af) {
2465 #if INET
2466 case AF_INET:
2467 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2468 ooa.addr16[0], oa->addr16[0], 0),
2469 ooa.addr16[1], oa->addr16[1], 0);
2470 break;
2471 #endif /* INET */
2472 #if INET6
2473 case AF_INET6:
2474 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2475 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2476 pf_cksum_fixup(pf_cksum_fixup(*ic,
2477 ooa.addr16[0], oa->addr16[0], u),
2478 ooa.addr16[1], oa->addr16[1], u),
2479 ooa.addr16[2], oa->addr16[2], u),
2480 ooa.addr16[3], oa->addr16[3], u),
2481 ooa.addr16[4], oa->addr16[4], u),
2482 ooa.addr16[5], oa->addr16[5], u),
2483 ooa.addr16[6], oa->addr16[6], u),
2484 ooa.addr16[7], oa->addr16[7], u);
2485 break;
2486 #endif /* INET6 */
2487 }
2488 }
2489
2490
2491 /*
2492 * Need to modulate the sequence numbers in the TCP SACK option
2493 * (credits to Krzysztof Pfaff for report and patch)
2494 */
2495 static int
2496 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2497 struct tcphdr *th, struct pf_state_peer *dst)
2498 {
2499 int hlen = (th->th_off << 2) - sizeof (*th), thoptlen = hlen;
2500 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2501 int copyback = 0, i, olen;
2502 struct sackblk sack;
2503
2504 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2505 if (hlen < TCPOLEN_SACKLEN ||
2506 !pf_pull_hdr(m, off + sizeof (*th), opts, hlen, NULL, NULL, pd->af))
2507 return (0);
2508
2509 while (hlen >= TCPOLEN_SACKLEN) {
2510 olen = opt[1];
2511 switch (*opt) {
2512 case TCPOPT_EOL: /* FALLTHROUGH */
2513 case TCPOPT_NOP:
2514 opt++;
2515 hlen--;
2516 break;
2517 case TCPOPT_SACK:
2518 if (olen > hlen)
2519 olen = hlen;
2520 if (olen >= TCPOLEN_SACKLEN) {
2521 for (i = 2; i + TCPOLEN_SACK <= olen;
2522 i += TCPOLEN_SACK) {
2523 memcpy(&sack, &opt[i], sizeof (sack));
2524 pf_change_a(&sack.start, &th->th_sum,
2525 htonl(ntohl(sack.start) -
2526 dst->seqdiff), 0);
2527 pf_change_a(&sack.end, &th->th_sum,
2528 htonl(ntohl(sack.end) -
2529 dst->seqdiff), 0);
2530 memcpy(&opt[i], &sack, sizeof (sack));
2531 }
2532 #ifndef NO_APPLE_EXTENSIONS
2533 copyback = off + sizeof (*th) + thoptlen;
2534 #else
2535 copyback = 1;
2536 #endif
2537 }
2538 /* FALLTHROUGH */
2539 default:
2540 if (olen < 2)
2541 olen = 2;
2542 hlen -= olen;
2543 opt += olen;
2544 }
2545 }
2546
2547 #ifndef NO_APPLE_EXTENSIONS
2548 if (copyback) {
2549 m = pf_lazy_makewritable(pd, m, copyback);
2550 if (!m)
2551 return (-1);
2552 m_copyback(m, off + sizeof (*th), thoptlen, opts);
2553 }
2554 #else
2555 if (copyback)
2556 m_copyback(m, off + sizeof (*th), thoptlen, opts);
2557 #endif
2558 return (copyback);
2559 }
2560
2561 static void
2562 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2563 const struct pf_addr *saddr, const struct pf_addr *daddr,
2564 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2565 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2566 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2567 {
2568 #pragma unused(eh, ifp)
2569 struct mbuf *m;
2570 int len, tlen;
2571 #if INET
2572 struct ip *h = NULL;
2573 #endif /* INET */
2574 #if INET6
2575 struct ip6_hdr *h6 = NULL;
2576 #endif /* INET6 */
2577 struct tcphdr *th = NULL;
2578 char *opt;
2579 struct pf_mtag *pf_mtag;
2580
2581 /* maximum segment size tcp option */
2582 tlen = sizeof (struct tcphdr);
2583 if (mss)
2584 tlen += 4;
2585
2586 switch (af) {
2587 #if INET
2588 case AF_INET:
2589 len = sizeof (struct ip) + tlen;
2590 break;
2591 #endif /* INET */
2592 #if INET6
2593 case AF_INET6:
2594 len = sizeof (struct ip6_hdr) + tlen;
2595 break;
2596 #endif /* INET6 */
2597 default:
2598 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2599 return;
2600 }
2601
2602 /* create outgoing mbuf */
2603 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2604 if (m == NULL)
2605 return;
2606
2607 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2608 m_free(m);
2609 return;
2610 }
2611
2612 if (tag)
2613 pf_mtag->flags |= PF_TAG_GENERATED;
2614 pf_mtag->tag = rtag;
2615
2616 if (r != NULL && PF_RTABLEID_IS_VALID(r->rtableid))
2617 pf_mtag->rtableid = r->rtableid;
2618
2619 #if ALTQ
2620 if (r != NULL && r->qid) {
2621 pf_mtag->qid = r->qid;
2622 /* add hints for ecn */
2623 pf_mtag->hdr = mtod(m, struct ip *);
2624 }
2625 #endif /* ALTQ */
2626 m->m_data += max_linkhdr;
2627 m->m_pkthdr.len = m->m_len = len;
2628 m->m_pkthdr.rcvif = NULL;
2629 bzero(m->m_data, len);
2630 switch (af) {
2631 #if INET
2632 case AF_INET:
2633 h = mtod(m, struct ip *);
2634
2635 /* IP header fields included in the TCP checksum */
2636 h->ip_p = IPPROTO_TCP;
2637 h->ip_len = htons(tlen);
2638 h->ip_src.s_addr = saddr->v4.s_addr;
2639 h->ip_dst.s_addr = daddr->v4.s_addr;
2640
2641 th = (struct tcphdr *)((caddr_t)h + sizeof (struct ip));
2642 break;
2643 #endif /* INET */
2644 #if INET6
2645 case AF_INET6:
2646 h6 = mtod(m, struct ip6_hdr *);
2647
2648 /* IP header fields included in the TCP checksum */
2649 h6->ip6_nxt = IPPROTO_TCP;
2650 h6->ip6_plen = htons(tlen);
2651 memcpy(&h6->ip6_src, &saddr->v6, sizeof (struct in6_addr));
2652 memcpy(&h6->ip6_dst, &daddr->v6, sizeof (struct in6_addr));
2653
2654 th = (struct tcphdr *)((caddr_t)h6 + sizeof (struct ip6_hdr));
2655 break;
2656 #endif /* INET6 */
2657 }
2658
2659 /* TCP header */
2660 th->th_sport = sport;
2661 th->th_dport = dport;
2662 th->th_seq = htonl(seq);
2663 th->th_ack = htonl(ack);
2664 th->th_off = tlen >> 2;
2665 th->th_flags = flags;
2666 th->th_win = htons(win);
2667
2668 if (mss) {
2669 opt = (char *)(th + 1);
2670 opt[0] = TCPOPT_MAXSEG;
2671 opt[1] = 4;
2672 #if BYTE_ORDER != BIG_ENDIAN
2673 HTONS(mss);
2674 #endif
2675 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2676 }
2677
2678 switch (af) {
2679 #if INET
2680 case AF_INET: {
2681 struct route ro;
2682
2683 /* TCP checksum */
2684 th->th_sum = in_cksum(m, len);
2685
2686 /* Finish the IP header */
2687 h->ip_v = 4;
2688 h->ip_hl = sizeof (*h) >> 2;
2689 h->ip_tos = IPTOS_LOWDELAY;
2690 /*
2691 * ip_output() expects ip_len and ip_off to be in host order.
2692 */
2693 h->ip_len = len;
2694 h->ip_off = (path_mtu_discovery ? IP_DF : 0);
2695 h->ip_ttl = ttl ? ttl : ip_defttl;
2696 h->ip_sum = 0;
2697
2698 bzero(&ro, sizeof (ro));
2699 ip_output(m, NULL, &ro, 0, NULL, NULL);
2700 if (ro.ro_rt != NULL)
2701 rtfree(ro.ro_rt);
2702 break;
2703 }
2704 #endif /* INET */
2705 #if INET6
2706 case AF_INET6: {
2707 struct route_in6 ro6;
2708
2709 /* TCP checksum */
2710 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2711 sizeof (struct ip6_hdr), tlen);
2712
2713 h6->ip6_vfc |= IPV6_VERSION;
2714 h6->ip6_hlim = IPV6_DEFHLIM;
2715
2716 bzero(&ro6, sizeof (ro6));
2717 ip6_output(m, NULL, &ro6, 0, NULL, NULL, 0);
2718 if (ro6.ro_rt != NULL)
2719 rtfree(ro6.ro_rt);
2720 break;
2721 }
2722 #endif /* INET6 */
2723 }
2724 }
2725
2726 static void
2727 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2728 struct pf_rule *r)
2729 {
2730 struct mbuf *m0;
2731 struct pf_mtag *pf_mtag;
2732
2733 m0 = m_copy(m, 0, M_COPYALL);
2734 if (m0 == NULL)
2735 return;
2736
2737 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2738 return;
2739
2740 pf_mtag->flags |= PF_TAG_GENERATED;
2741
2742 if (PF_RTABLEID_IS_VALID(r->rtableid))
2743 pf_mtag->rtableid = r->rtableid;
2744
2745 #if ALTQ
2746 if (r->qid) {
2747 pf_mtag->qid = r->qid;
2748 /* add hints for ecn */
2749 pf_mtag->hdr = mtod(m0, struct ip *);
2750 }
2751 #endif /* ALTQ */
2752 switch (af) {
2753 #if INET
2754 case AF_INET:
2755 icmp_error(m0, type, code, 0, 0);
2756 break;
2757 #endif /* INET */
2758 #if INET6
2759 case AF_INET6:
2760 icmp6_error(m0, type, code, 0);
2761 break;
2762 #endif /* INET6 */
2763 }
2764 }
2765
2766 /*
2767 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2768 * If n is 0, they match if they are equal. If n is != 0, they match if they
2769 * are different.
2770 */
2771 int
2772 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2773 struct pf_addr *b, sa_family_t af)
2774 {
2775 int match = 0;
2776
2777 switch (af) {
2778 #if INET
2779 case AF_INET:
2780 if ((a->addr32[0] & m->addr32[0]) ==
2781 (b->addr32[0] & m->addr32[0]))
2782 match++;
2783 break;
2784 #endif /* INET */
2785 #if INET6
2786 case AF_INET6:
2787 if (((a->addr32[0] & m->addr32[0]) ==
2788 (b->addr32[0] & m->addr32[0])) &&
2789 ((a->addr32[1] & m->addr32[1]) ==
2790 (b->addr32[1] & m->addr32[1])) &&
2791 ((a->addr32[2] & m->addr32[2]) ==
2792 (b->addr32[2] & m->addr32[2])) &&
2793 ((a->addr32[3] & m->addr32[3]) ==
2794 (b->addr32[3] & m->addr32[3])))
2795 match++;
2796 break;
2797 #endif /* INET6 */
2798 }
2799 if (match) {
2800 if (n)
2801 return (0);
2802 else
2803 return (1);
2804 } else {
2805 if (n)
2806 return (1);
2807 else
2808 return (0);
2809 }
2810 }
2811
2812 /*
2813 * Return 1 if b <= a <= e, otherwise return 0.
2814 */
2815 int
2816 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2817 struct pf_addr *a, sa_family_t af)
2818 {
2819 switch (af) {
2820 #if INET
2821 case AF_INET:
2822 if ((a->addr32[0] < b->addr32[0]) ||
2823 (a->addr32[0] > e->addr32[0]))
2824 return (0);
2825 break;
2826 #endif /* INET */
2827 #if INET6
2828 case AF_INET6: {
2829 int i;
2830
2831 /* check a >= b */
2832 for (i = 0; i < 4; ++i)
2833 if (a->addr32[i] > b->addr32[i])
2834 break;
2835 else if (a->addr32[i] < b->addr32[i])
2836 return (0);
2837 /* check a <= e */
2838 for (i = 0; i < 4; ++i)
2839 if (a->addr32[i] < e->addr32[i])
2840 break;
2841 else if (a->addr32[i] > e->addr32[i])
2842 return (0);
2843 break;
2844 }
2845 #endif /* INET6 */
2846 }
2847 return (1);
2848 }
2849
2850 int
2851 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2852 {
2853 switch (op) {
2854 case PF_OP_IRG:
2855 return ((p > a1) && (p < a2));
2856 case PF_OP_XRG:
2857 return ((p < a1) || (p > a2));
2858 case PF_OP_RRG:
2859 return ((p >= a1) && (p <= a2));
2860 case PF_OP_EQ:
2861 return (p == a1);
2862 case PF_OP_NE:
2863 return (p != a1);
2864 case PF_OP_LT:
2865 return (p < a1);
2866 case PF_OP_LE:
2867 return (p <= a1);
2868 case PF_OP_GT:
2869 return (p > a1);
2870 case PF_OP_GE:
2871 return (p >= a1);
2872 }
2873 return (0); /* never reached */
2874 }
2875
2876 int
2877 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2878 {
2879 #if BYTE_ORDER != BIG_ENDIAN
2880 NTOHS(a1);
2881 NTOHS(a2);
2882 NTOHS(p);
2883 #endif
2884 return (pf_match(op, a1, a2, p));
2885 }
2886
2887 #ifndef NO_APPLE_EXTENSIONS
2888 int
2889 pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx,
2890 union pf_state_xport *sx)
2891 {
2892 int d = !0;
2893
2894 if (sx) {
2895 switch (proto) {
2896 case IPPROTO_GRE:
2897 if (proto_variant == PF_GRE_PPTP_VARIANT)
2898 d = (rx->call_id == sx->call_id);
2899 break;
2900
2901 case IPPROTO_ESP:
2902 d = (rx->spi == sx->spi);
2903 break;
2904
2905 case IPPROTO_TCP:
2906 case IPPROTO_UDP:
2907 case IPPROTO_ICMP:
2908 case IPPROTO_ICMPV6:
2909 if (rx->range.op)
2910 d = pf_match_port(rx->range.op,
2911 rx->range.port[0], rx->range.port[1],
2912 sx->port);
2913 break;
2914
2915 default:
2916 break;
2917 }
2918 }
2919
2920 return (d);
2921 }
2922 #endif
2923
2924 int
2925 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2926 {
2927 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2928 return (0);
2929 return (pf_match(op, a1, a2, u));
2930 }
2931
2932 int
2933 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2934 {
2935 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2936 return (0);
2937 return (pf_match(op, a1, a2, g));
2938 }
2939
2940 static int
2941 pf_match_tag(struct mbuf *m, struct pf_rule *r, struct pf_mtag *pf_mtag,
2942 int *tag)
2943 {
2944 #pragma unused(m)
2945 if (*tag == -1)
2946 *tag = pf_mtag->tag;
2947
2948 return ((!r->match_tag_not && r->match_tag == *tag) ||
2949 (r->match_tag_not && r->match_tag != *tag));
2950 }
2951
2952 int
2953 pf_tag_packet(struct mbuf *m, struct pf_mtag *pf_mtag, int tag,
2954 unsigned int rtableid)
2955 {
2956 if (tag <= 0 && !PF_RTABLEID_IS_VALID(rtableid))
2957 return (0);
2958
2959 if (pf_mtag == NULL && (pf_mtag = pf_get_mtag(m)) == NULL)
2960 return (1);
2961
2962 if (tag > 0)
2963 pf_mtag->tag = tag;
2964 if (PF_RTABLEID_IS_VALID(rtableid))
2965 pf_mtag->rtableid = rtableid;
2966
2967 return (0);
2968 }
2969
2970 static void
2971 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2972 struct pf_rule **r, struct pf_rule **a, int *match)
2973 {
2974 struct pf_anchor_stackframe *f;
2975
2976 (*r)->anchor->match = 0;
2977 if (match)
2978 *match = 0;
2979 if (*depth >= (int)sizeof (pf_anchor_stack) /
2980 (int)sizeof (pf_anchor_stack[0])) {
2981 printf("pf_step_into_anchor: stack overflow\n");
2982 *r = TAILQ_NEXT(*r, entries);
2983 return;
2984 } else if (*depth == 0 && a != NULL)
2985 *a = *r;
2986 f = pf_anchor_stack + (*depth)++;
2987 f->rs = *rs;
2988 f->r = *r;
2989 if ((*r)->anchor_wildcard) {
2990 f->parent = &(*r)->anchor->children;
2991 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2992 NULL) {
2993 *r = NULL;
2994 return;
2995 }
2996 *rs = &f->child->ruleset;
2997 } else {
2998 f->parent = NULL;
2999 f->child = NULL;
3000 *rs = &(*r)->anchor->ruleset;
3001 }
3002 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3003 }
3004
3005 static int
3006 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
3007 struct pf_rule **r, struct pf_rule **a, int *match)
3008 {
3009 struct pf_anchor_stackframe *f;
3010 int quick = 0;
3011
3012 do {
3013 if (*depth <= 0)
3014 break;
3015 f = pf_anchor_stack + *depth - 1;
3016 if (f->parent != NULL && f->child != NULL) {
3017 if (f->child->match ||
3018 (match != NULL && *match)) {
3019 f->r->anchor->match = 1;
3020 *match = 0;
3021 }
3022 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
3023 if (f->child != NULL) {
3024 *rs = &f->child->ruleset;
3025 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3026 if (*r == NULL)
3027 continue;
3028 else
3029 break;
3030 }
3031 }
3032 (*depth)--;
3033 if (*depth == 0 && a != NULL)
3034 *a = NULL;
3035 *rs = f->rs;
3036 if (f->r->anchor->match || (match != NULL && *match))
3037 quick = f->r->quick;
3038 *r = TAILQ_NEXT(f->r, entries);
3039 } while (*r == NULL);
3040
3041 return (quick);
3042 }
3043
3044 #if INET6
3045 void
3046 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3047 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3048 {
3049 switch (af) {
3050 #if INET
3051 case AF_INET:
3052 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3053 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
3054 break;
3055 #endif /* INET */
3056 case AF_INET6:
3057 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3058 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
3059 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3060 ((rmask->addr32[1] ^ 0xffffffff) & saddr->addr32[1]);
3061 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3062 ((rmask->addr32[2] ^ 0xffffffff) & saddr->addr32[2]);
3063 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3064 ((rmask->addr32[3] ^ 0xffffffff) & saddr->addr32[3]);
3065 break;
3066 }
3067 }
3068
3069 void
3070 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3071 {
3072 switch (af) {
3073 #if INET
3074 case AF_INET:
3075 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3076 break;
3077 #endif /* INET */
3078 case AF_INET6:
3079 if (addr->addr32[3] == 0xffffffff) {
3080 addr->addr32[3] = 0;
3081 if (addr->addr32[2] == 0xffffffff) {
3082 addr->addr32[2] = 0;
3083 if (addr->addr32[1] == 0xffffffff) {
3084 addr->addr32[1] = 0;
3085 addr->addr32[0] =
3086 htonl(ntohl(addr->addr32[0]) + 1);
3087 } else
3088 addr->addr32[1] =
3089 htonl(ntohl(addr->addr32[1]) + 1);
3090 } else
3091 addr->addr32[2] =
3092 htonl(ntohl(addr->addr32[2]) + 1);
3093 } else
3094 addr->addr32[3] =
3095 htonl(ntohl(addr->addr32[3]) + 1);
3096 break;
3097 }
3098 }
3099 #endif /* INET6 */
3100
3101 #define mix(a, b, c) \
3102 do { \
3103 a -= b; a -= c; a ^= (c >> 13); \
3104 b -= c; b -= a; b ^= (a << 8); \
3105 c -= a; c -= b; c ^= (b >> 13); \
3106 a -= b; a -= c; a ^= (c >> 12); \
3107 b -= c; b -= a; b ^= (a << 16); \
3108 c -= a; c -= b; c ^= (b >> 5); \
3109 a -= b; a -= c; a ^= (c >> 3); \
3110 b -= c; b -= a; b ^= (a << 10); \
3111 c -= a; c -= b; c ^= (b >> 15); \
3112 } while (0)
3113
3114 /*
3115 * hash function based on bridge_hash in if_bridge.c
3116 */
3117 static void
3118 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
3119 struct pf_poolhashkey *key, sa_family_t af)
3120 {
3121 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
3122
3123 switch (af) {
3124 #if INET
3125 case AF_INET:
3126 a += inaddr->addr32[0];
3127 b += key->key32[1];
3128 mix(a, b, c);
3129 hash->addr32[0] = c + key->key32[2];
3130 break;
3131 #endif /* INET */
3132 #if INET6
3133 case AF_INET6:
3134 a += inaddr->addr32[0];
3135 b += inaddr->addr32[2];
3136 mix(a, b, c);
3137 hash->addr32[0] = c;
3138 a += inaddr->addr32[1];
3139 b += inaddr->addr32[3];
3140 c += key->key32[1];
3141 mix(a, b, c);
3142 hash->addr32[1] = c;
3143 a += inaddr->addr32[2];
3144 b += inaddr->addr32[1];
3145 c += key->key32[2];
3146 mix(a, b, c);
3147 hash->addr32[2] = c;
3148 a += inaddr->addr32[3];
3149 b += inaddr->addr32[0];
3150 c += key->key32[3];
3151 mix(a, b, c);
3152 hash->addr32[3] = c;
3153 break;
3154 #endif /* INET6 */
3155 }
3156 }
3157
3158 static int
3159 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
3160 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
3161 {
3162 unsigned char hash[16];
3163 struct pf_pool *rpool = &r->rpool;
3164 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
3165 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
3166 struct pf_pooladdr *acur = rpool->cur;
3167 struct pf_src_node k;
3168
3169 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
3170 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3171 k.af = af;
3172 PF_ACPY(&k.addr, saddr, af);
3173 if (r->rule_flag & PFRULE_RULESRCTRACK ||
3174 r->rpool.opts & PF_POOL_STICKYADDR)
3175 k.rule.ptr = r;
3176 else
3177 k.rule.ptr = NULL;
3178 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
3179 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
3180 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
3181 PF_ACPY(naddr, &(*sn)->raddr, af);
3182 if (pf_status.debug >= PF_DEBUG_MISC) {
3183 printf("pf_map_addr: src tracking maps ");
3184 pf_print_host(&k.addr, 0, af);
3185 printf(" to ");
3186 pf_print_host(naddr, 0, af);
3187 printf("\n");
3188 }
3189 return (0);
3190 }
3191 }
3192
3193 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
3194 return (1);
3195 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3196 switch (af) {
3197 #if INET
3198 case AF_INET:
3199 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
3200 (rpool->opts & PF_POOL_TYPEMASK) !=
3201 PF_POOL_ROUNDROBIN)
3202 return (1);
3203 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
3204 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
3205 break;
3206 #endif /* INET */
3207 #if INET6
3208 case AF_INET6:
3209 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
3210 (rpool->opts & PF_POOL_TYPEMASK) !=
3211 PF_POOL_ROUNDROBIN)
3212 return (1);
3213 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
3214 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
3215 break;
3216 #endif /* INET6 */
3217 }
3218 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3219 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
3220 return (1); /* unsupported */
3221 } else {
3222 raddr = &rpool->cur->addr.v.a.addr;
3223 rmask = &rpool->cur->addr.v.a.mask;
3224 }
3225
3226 switch (rpool->opts & PF_POOL_TYPEMASK) {
3227 case PF_POOL_NONE:
3228 PF_ACPY(naddr, raddr, af);
3229 break;
3230 case PF_POOL_BITMASK:
3231 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
3232 break;
3233 case PF_POOL_RANDOM:
3234 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
3235 switch (af) {
3236 #if INET
3237 case AF_INET:
3238 rpool->counter.addr32[0] = htonl(random());
3239 break;
3240 #endif /* INET */
3241 #if INET6
3242 case AF_INET6:
3243 if (rmask->addr32[3] != 0xffffffff)
3244 rpool->counter.addr32[3] =
3245 htonl(random());
3246 else
3247 break;
3248 if (rmask->addr32[2] != 0xffffffff)
3249 rpool->counter.addr32[2] =
3250 htonl(random());
3251 else
3252 break;
3253 if (rmask->addr32[1] != 0xffffffff)
3254 rpool->counter.addr32[1] =
3255 htonl(random());
3256 else
3257 break;
3258 if (rmask->addr32[0] != 0xffffffff)
3259 rpool->counter.addr32[0] =
3260 htonl(random());
3261 break;
3262 #endif /* INET6 */
3263 }
3264 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
3265 PF_ACPY(init_addr, naddr, af);
3266
3267 } else {
3268 PF_AINC(&rpool->counter, af);
3269 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
3270 }
3271 break;
3272 case PF_POOL_SRCHASH:
3273 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
3274 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
3275 break;
3276 case PF_POOL_ROUNDROBIN:
3277 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3278 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
3279 &rpool->tblidx, &rpool->counter,
3280 &raddr, &rmask, af))
3281 goto get_addr;
3282 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3283 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3284 &rpool->tblidx, &rpool->counter,
3285 &raddr, &rmask, af))
3286 goto get_addr;
3287 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
3288 goto get_addr;
3289
3290 try_next:
3291 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
3292 rpool->cur = TAILQ_FIRST(&rpool->list);
3293 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3294 rpool->tblidx = -1;
3295 if (pfr_pool_get(rpool->cur->addr.p.tbl,
3296 &rpool->tblidx, &rpool->counter,
3297 &raddr, &rmask, af)) {
3298 /* table contains no address of type 'af' */
3299 if (rpool->cur != acur)
3300 goto try_next;
3301 return (1);
3302 }
3303 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3304 rpool->tblidx = -1;
3305 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3306 &rpool->tblidx, &rpool->counter,
3307 &raddr, &rmask, af)) {
3308 /* table contains no address of type 'af' */
3309 if (rpool->cur != acur)
3310 goto try_next;
3311 return (1);
3312 }
3313 } else {
3314 raddr = &rpool->cur->addr.v.a.addr;
3315 rmask = &rpool->cur->addr.v.a.mask;
3316 PF_ACPY(&rpool->counter, raddr, af);
3317 }
3318
3319 get_addr:
3320 PF_ACPY(naddr, &rpool->counter, af);
3321 if (init_addr != NULL && PF_AZERO(init_addr, af))
3322 PF_ACPY(init_addr, naddr, af);
3323 PF_AINC(&rpool->counter, af);
3324 break;
3325 }
3326 if (*sn != NULL)
3327 PF_ACPY(&(*sn)->raddr, naddr, af);
3328
3329 if (pf_status.debug >= PF_DEBUG_MISC &&
3330 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3331 printf("pf_map_addr: selected address ");
3332 pf_print_host(naddr, 0, af);
3333 printf("\n");
3334 }
3335
3336 return (0);
3337 }
3338
3339 #ifndef NO_APPLE_EXTENSIONS
3340 static int
3341 pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r,
3342 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3343 union pf_state_xport *dxport, struct pf_addr *naddr,
3344 union pf_state_xport *nxport, struct pf_src_node **sn)
3345 #else
3346 int
3347 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r,
3348 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport,
3349 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high,
3350 struct pf_src_node **sn)
3351 #endif
3352 {
3353 #pragma unused(kif)
3354 struct pf_state_key_cmp key;
3355 struct pf_addr init_addr;
3356 #ifndef NO_APPLE_EXTENSIONS
3357 unsigned int cut;
3358 sa_family_t af = pd->af;
3359 u_int8_t proto = pd->proto;
3360 unsigned int low = r->rpool.proxy_port[0];
3361 unsigned int high = r->rpool.proxy_port[1];
3362 #else
3363 u_int16_t cut;
3364 #endif
3365
3366 bzero(&init_addr, sizeof (init_addr));
3367 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3368 return (1);
3369
3370 if (proto == IPPROTO_ICMP) {
3371 low = 1;
3372 high = 65535;
3373 }
3374
3375 #ifndef NO_APPLE_EXTENSIONS
3376 if (!nxport)
3377 return (0); /* No output necessary. */
3378
3379 /*--- Special mapping rules for UDP ---*/
3380 if (proto == IPPROTO_UDP) {
3381
3382 /*--- Never float IKE source port ---*/
3383 if (ntohs(sxport->port) == PF_IKE_PORT) {
3384 nxport->port = sxport->port;
3385 return (0);
3386 }
3387
3388 /*--- Apply exterior mapping options ---*/
3389 if (r->extmap > PF_EXTMAP_APD) {
3390 struct pf_state *s;
3391
3392 TAILQ_FOREACH(s, &state_list, entry_list) {
3393 struct pf_state_key *sk = s->state_key;
3394 if (!sk)
3395 continue;
3396 if (s->nat_rule.ptr != r)
3397 continue;
3398 if (sk->proto != IPPROTO_UDP || sk->af != af)
3399 continue;
3400 if (sk->lan.xport.port != sxport->port)
3401 continue;
3402 if (PF_ANEQ(&sk->lan.addr, saddr, af))
3403 continue;
3404 if (r->extmap < PF_EXTMAP_EI &&
3405 PF_ANEQ(&sk->ext.addr, daddr, af))
3406 continue;
3407
3408 nxport->port = sk->gwy.xport.port;
3409 return (0);
3410 }
3411 }
3412 } else if (proto == IPPROTO_TCP) {
3413 struct pf_state* s;
3414 /*
3415 * APPLE MODIFICATION: <rdar://problem/6546358>
3416 * Fix allows....NAT to use a single binding for TCP session
3417 * with same source IP and source port
3418 */
3419 TAILQ_FOREACH(s, &state_list, entry_list) {
3420 struct pf_state_key* sk = s->state_key;
3421 if (!sk)
3422 continue;
3423 if (s->nat_rule.ptr != r)
3424 continue;
3425 if (sk->proto != IPPROTO_TCP || sk->af != af)
3426 continue;
3427 if (sk->lan.xport.port != sxport->port)
3428 continue;
3429 if (!(PF_AEQ(&sk->lan.addr, saddr, af)))
3430 continue;
3431 nxport->port = sk->gwy.xport.port;
3432 return (0);
3433 }
3434 }
3435 #endif
3436 do {
3437 key.af = af;
3438 key.proto = proto;
3439 PF_ACPY(&key.ext.addr, daddr, key.af);
3440 PF_ACPY(&key.gwy.addr, naddr, key.af);
3441 #ifndef NO_APPLE_EXTENSIONS
3442 switch (proto) {
3443 case IPPROTO_UDP:
3444 key.proto_variant = r->extfilter;
3445 break;
3446 default:
3447 key.proto_variant = 0;
3448 break;
3449 }
3450 if (dxport)
3451 key.ext.xport = *dxport;
3452 else
3453 memset(&key.ext.xport, 0, sizeof (key.ext.xport));
3454 #else
3455 key.ext.port = dport;
3456 #endif
3457 /*
3458 * port search; start random, step;
3459 * similar 2 portloop in in_pcbbind
3460 */
3461 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP ||
3462 proto == IPPROTO_ICMP)) {
3463 #ifndef NO_APPLE_EXTENSIONS
3464 if (dxport)
3465 key.gwy.xport = *dxport;
3466 else
3467 memset(&key.gwy.xport, 0,
3468 sizeof (key.ext.xport));
3469 #else
3470 key.gwy.port = dport;
3471 #endif
3472 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3473 return (0);
3474 } else if (low == 0 && high == 0) {
3475 #ifndef NO_APPLE_EXTENSIONS
3476 key.gwy.xport = *nxport;
3477 #else
3478 key.gwy.port = *nport;
3479 #endif
3480 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3481 return (0);
3482 } else if (low == high) {
3483 #ifndef NO_APPLE_EXTENSIONS
3484 key.gwy.xport.port = htons(low);
3485 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
3486 nxport->port = htons(low);
3487 return (0);
3488 }
3489 #else
3490 key.gwy.port = htons(low);
3491 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
3492 *nport = htons(low);
3493 return (0);
3494 }
3495 #endif
3496 } else {
3497 #ifndef NO_APPLE_EXTENSIONS
3498 unsigned int tmp;
3499 #else
3500 u_int16_t tmp;
3501 #endif
3502 if (low > high) {
3503 tmp = low;
3504 low = high;
3505 high = tmp;
3506 }
3507 /* low < high */
3508 cut = htonl(random()) % (1 + high - low) + low;
3509 /* low <= cut <= high */
3510 for (tmp = cut; tmp <= high; ++(tmp)) {
3511 #ifndef NO_APPLE_EXTENSIONS
3512 key.gwy.xport.port = htons(tmp);
3513 if (pf_find_state_all(&key, PF_IN, NULL) ==
3514 NULL) {
3515 nxport->port = htons(tmp);
3516 return (0);
3517 }
3518 #else
3519 key.gwy.port = htons(tmp);
3520 if (pf_find_state_all(&key, PF_IN, NULL) ==
3521 NULL) {
3522 *nport = htons(tmp);
3523 return (0);
3524 }
3525 #endif
3526 }
3527 for (tmp = cut - 1; tmp >= low; --(tmp)) {
3528 #ifndef NO_APPLE_EXTENSIONS
3529 key.gwy.xport.port = htons(tmp);
3530 if (pf_find_state_all(&key, PF_IN, NULL) ==
3531 NULL) {
3532 nxport->port = htons(tmp);
3533 return (0);
3534 }
3535 #else
3536 key.gwy.port = htons(tmp);
3537 if (pf_find_state_all(&key, PF_IN, NULL) ==
3538 NULL) {
3539 *nport = htons(tmp);
3540 return (0);
3541 }
3542 #endif
3543 }
3544 }
3545
3546 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
3547 case PF_POOL_RANDOM:
3548 case PF_POOL_ROUNDROBIN:
3549 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3550 return (1);
3551 break;
3552 case PF_POOL_NONE:
3553 case PF_POOL_SRCHASH:
3554 case PF_POOL_BITMASK:
3555 default:
3556 return (1);
3557 }
3558 } while (!PF_AEQ(&init_addr, naddr, af));
3559
3560 return (1); /* none available */
3561 }
3562
3563 #ifndef NO_APPLE_EXTENSIONS
3564 static struct pf_rule *
3565 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
3566 int direction, struct pfi_kif *kif, struct pf_addr *saddr,
3567 union pf_state_xport *sxport, struct pf_addr *daddr,
3568 union pf_state_xport *dxport, int rs_num)
3569 #else
3570 struct pf_rule *
3571 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
3572 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
3573 struct pf_addr *daddr, u_int16_t dport, int rs_num)
3574 #endif
3575 {
3576 struct pf_rule *r, *rm = NULL;
3577 struct pf_ruleset *ruleset = NULL;
3578 int tag = -1;
3579 unsigned int rtableid = IFSCOPE_NONE;
3580 int asd = 0;
3581
3582 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
3583 while (r && rm == NULL) {
3584 struct pf_rule_addr *src = NULL, *dst = NULL;
3585 struct pf_addr_wrap *xdst = NULL;
3586 #ifndef NO_APPLE_EXTENSIONS
3587 struct pf_addr_wrap *xsrc = NULL;
3588 #endif
3589
3590 if (r->action == PF_BINAT && direction == PF_IN) {
3591 src = &r->dst;
3592 if (r->rpool.cur != NULL)
3593 xdst = &r->rpool.cur->addr;
3594 #ifndef NO_APPLE_EXTENSIONS
3595 } else if (r->action == PF_RDR && direction == PF_OUT) {
3596 dst = &r->src;
3597 src = &r->dst;
3598 if (r->rpool.cur != NULL)
3599 xsrc = &r->rpool.cur->addr;
3600 #endif
3601 } else {
3602 src = &r->src;
3603 dst = &r->dst;
3604 }
3605
3606 r->evaluations++;
3607 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3608 r = r->skip[PF_SKIP_IFP].ptr;
3609 else if (r->direction && r->direction != direction)
3610 r = r->skip[PF_SKIP_DIR].ptr;
3611 else if (r->af && r->af != pd->af)
3612 r = r->skip[PF_SKIP_AF].ptr;
3613 else if (r->proto && r->proto != pd->proto)
3614 r = r->skip[PF_SKIP_PROTO].ptr;
3615 #ifndef NO_APPLE_EXTENSIONS
3616 else if (xsrc && PF_MISMATCHAW(xsrc, saddr, pd->af, 0, NULL))
3617 r = TAILQ_NEXT(r, entries);
3618 else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af,
3619 src->neg, kif))
3620 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
3621 PF_SKIP_DST_ADDR].ptr;
3622 else if (!pf_match_xport(r->proto,
3623 r->proto_variant, &src->xport, sxport))
3624 #else
3625 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
3626 src->neg, kif))
3627 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
3628 PF_SKIP_DST_ADDR].ptr;
3629 else if (src->port_op && !pf_match_port(src->port_op,
3630 src->port[0], src->port[1], sport))
3631 #endif
3632 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
3633 PF_SKIP_DST_PORT].ptr;
3634 else if (dst != NULL &&
3635 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
3636 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3637 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
3638 0, NULL))
3639 r = TAILQ_NEXT(r, entries);
3640 #ifndef NO_APPLE_EXTENSIONS
3641 else if (dst && !pf_match_xport(r->proto, r->proto_variant,
3642 &dst->xport, dxport))
3643 #else
3644 else if (dst != NULL && dst->port_op &&
3645 !pf_match_port(dst->port_op, dst->port[0],
3646 dst->port[1], dport))
3647 #endif
3648 r = r->skip[PF_SKIP_DST_PORT].ptr;
3649 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
3650 r = TAILQ_NEXT(r, entries);
3651 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
3652 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
3653 off, pd->hdr.tcp), r->os_fingerprint)))
3654 r = TAILQ_NEXT(r, entries);
3655 else {
3656 if (r->tag)
3657 tag = r->tag;
3658 if (PF_RTABLEID_IS_VALID(r->rtableid))
3659 rtableid = r->rtableid;
3660 if (r->anchor == NULL) {
3661 rm = r;
3662 } else
3663 pf_step_into_anchor(&asd, &ruleset, rs_num,
3664 &r, NULL, NULL);
3665 }
3666 if (r == NULL)
3667 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
3668 NULL, NULL);
3669 }
3670 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid))
3671 return (NULL);
3672 if (rm != NULL && (rm->action == PF_NONAT ||
3673 rm->action == PF_NORDR || rm->action == PF_NOBINAT))
3674 return (NULL);
3675 return (rm);
3676 }
3677
3678 #ifndef NO_APPLE_EXTENSIONS
3679 static struct pf_rule *
3680 pf_get_translation_aux(struct pf_pdesc *pd, struct mbuf *m, int off,
3681 int direction, struct pfi_kif *kif, struct pf_src_node **sn,
3682 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3683 union pf_state_xport *dxport, struct pf_addr *naddr,
3684 union pf_state_xport *nxport)
3685 #else
3686 struct pf_rule *
3687 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
3688 struct pfi_kif *kif, struct pf_src_node **sn,
3689 struct pf_addr *saddr, u_int16_t sport,
3690 struct pf_addr *daddr, u_int16_t dport,
3691 struct pf_addr *naddr, u_int16_t *nport)
3692 #endif
3693 {
3694 struct pf_rule *r = NULL;
3695
3696 #ifndef NO_APPLE_EXTENSIONS
3697 if (direction == PF_OUT) {
3698 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3699 sxport, daddr, dxport, PF_RULESET_BINAT);
3700 if (r == NULL)
3701 r = pf_match_translation(pd, m, off, direction, kif,
3702 saddr, sxport, daddr, dxport, PF_RULESET_RDR);
3703 if (r == NULL)
3704 r = pf_match_translation(pd, m, off, direction, kif,
3705 saddr, sxport, daddr, dxport, PF_RULESET_NAT);
3706 } else {
3707 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3708 sxport, daddr, dxport, PF_RULESET_RDR);
3709 if (r == NULL)
3710 r = pf_match_translation(pd, m, off, direction, kif,
3711 saddr, sxport, daddr, dxport, PF_RULESET_BINAT);
3712 }
3713 #else
3714 if (direction == PF_OUT) {
3715 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3716 sport, daddr, dport, PF_RULESET_BINAT);
3717 if (r == NULL)
3718 r = pf_match_translation(pd, m, off, direction, kif,
3719 saddr, sport, daddr, dport, PF_RULESET_NAT);
3720 } else {
3721 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3722 sport, daddr, dport, PF_RULESET_RDR);
3723 if (r == NULL)
3724 r = pf_match_translation(pd, m, off, direction, kif,
3725 saddr, sport, daddr, dport, PF_RULESET_BINAT);
3726 }
3727 #endif
3728
3729 if (r != NULL) {
3730 switch (r->action) {
3731 case PF_NONAT:
3732 case PF_NOBINAT:
3733 case PF_NORDR:
3734 return (NULL);
3735 case PF_NAT:
3736 #ifndef NO_APPLE_EXTENSIONS
3737 if (pf_get_sport(pd, kif, r, saddr, sxport, daddr,
3738 dxport, naddr, nxport, sn)) {
3739 #else
3740 if (pf_get_sport(pd->af, pd->proto, r, saddr,
3741 daddr, dport, naddr, nport, r->rpool.proxy_port[0],
3742 r->rpool.proxy_port[1], sn)) {
3743 #endif
3744 DPFPRINTF(PF_DEBUG_MISC,
3745 ("pf: NAT proxy port allocation "
3746 "(%u-%u) failed\n",
3747 r->rpool.proxy_port[0],
3748 r->rpool.proxy_port[1]));
3749 return (NULL);
3750 }
3751 break;
3752 case PF_BINAT:
3753 switch (direction) {
3754 case PF_OUT:
3755 if (r->rpool.cur->addr.type ==
3756 PF_ADDR_DYNIFTL) {
3757 switch (pd->af) {
3758 #if INET
3759 case AF_INET:
3760 if (r->rpool.cur->addr.p.dyn->
3761 pfid_acnt4 < 1)
3762 return (NULL);
3763 PF_POOLMASK(naddr,
3764 &r->rpool.cur->addr.p.dyn->
3765 pfid_addr4,
3766 &r->rpool.cur->addr.p.dyn->
3767 pfid_mask4,
3768 saddr, AF_INET);
3769 break;
3770 #endif /* INET */
3771 #if INET6
3772 case AF_INET6:
3773 if (r->rpool.cur->addr.p.dyn->
3774 pfid_acnt6 < 1)
3775 return (NULL);
3776 PF_POOLMASK(naddr,
3777 &r->rpool.cur->addr.p.dyn->
3778 pfid_addr6,
3779 &r->rpool.cur->addr.p.dyn->
3780 pfid_mask6,
3781 saddr, AF_INET6);
3782 break;
3783 #endif /* INET6 */
3784 }
3785 } else {
3786 PF_POOLMASK(naddr,
3787 &r->rpool.cur->addr.v.a.addr,
3788 &r->rpool.cur->addr.v.a.mask,
3789 saddr, pd->af);
3790 }
3791 break;
3792 case PF_IN:
3793 if (r->src.addr.type == PF_ADDR_DYNIFTL) {
3794 switch (pd->af) {
3795 #if INET
3796 case AF_INET:
3797 if (r->src.addr.p.dyn->
3798 pfid_acnt4 < 1)
3799 return (NULL);
3800 PF_POOLMASK(naddr,
3801 &r->src.addr.p.dyn->
3802 pfid_addr4,
3803 &r->src.addr.p.dyn->
3804 pfid_mask4,
3805 daddr, AF_INET);
3806 break;
3807 #endif /* INET */
3808 #if INET6
3809 case AF_INET6:
3810 if (r->src.addr.p.dyn->
3811 pfid_acnt6 < 1)
3812 return (NULL);
3813 PF_POOLMASK(naddr,
3814 &r->src.addr.p.dyn->
3815 pfid_addr6,
3816 &r->src.addr.p.dyn->
3817 pfid_mask6,
3818 daddr, AF_INET6);
3819 break;
3820 #endif /* INET6 */
3821 }
3822 } else
3823 PF_POOLMASK(naddr,
3824 &r->src.addr.v.a.addr,
3825 &r->src.addr.v.a.mask, daddr,
3826 pd->af);
3827 break;
3828 }
3829 break;
3830 case PF_RDR: {
3831 #ifndef NO_APPLE_EXTENSIONS
3832 switch (direction) {
3833 case PF_OUT:
3834 if (r->dst.addr.type == PF_ADDR_DYNIFTL) {
3835 switch (pd->af) {
3836 #if INET
3837 case AF_INET:
3838 if (r->dst.addr.p.dyn->
3839 pfid_acnt4 < 1)
3840 return (NULL);
3841 PF_POOLMASK(naddr,
3842 &r->dst.addr.p.dyn->
3843 pfid_addr4,
3844 &r->dst.addr.p.dyn->
3845 pfid_mask4,
3846 daddr, AF_INET);
3847 break;
3848 #endif /* INET */
3849 #if INET6
3850 case AF_INET6:
3851 if (r->dst.addr.p.dyn->
3852 pfid_acnt6 < 1)
3853 return (NULL);
3854 PF_POOLMASK(naddr,
3855 &r->dst.addr.p.dyn->
3856 pfid_addr6,
3857 &r->dst.addr.p.dyn->
3858 pfid_mask6,
3859 daddr, AF_INET6);
3860 break;
3861 #endif /* INET6 */
3862 }
3863 } else {
3864 PF_POOLMASK(naddr,
3865 &r->dst.addr.v.a.addr,
3866 &r->dst.addr.v.a.mask,
3867 daddr, pd->af);
3868 }
3869 if (nxport && dxport)
3870 *nxport = *sxport;
3871 break;
3872 case PF_IN:
3873 if (pf_map_addr(pd->af, r, saddr,
3874 naddr, NULL, sn))
3875 return (NULL);
3876 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3877 PF_POOL_BITMASK)
3878 PF_POOLMASK(naddr, naddr,
3879 &r->rpool.cur->addr.v.a.mask, daddr,
3880 pd->af);
3881
3882 if (nxport && dxport) {
3883 if (r->rpool.proxy_port[1]) {
3884 u_int32_t tmp_nport;
3885
3886 tmp_nport =
3887 ((ntohs(dxport->port) -
3888 ntohs(r->dst.xport.range.
3889 port[0])) %
3890 (r->rpool.proxy_port[1] -
3891 r->rpool.proxy_port[0] +
3892 1)) + r->rpool.proxy_port[0];
3893
3894 /* wrap around if necessary */
3895 if (tmp_nport > 65535)
3896 tmp_nport -= 65535;
3897 nxport->port =
3898 htons((u_int16_t)tmp_nport);
3899 } else if (r->rpool.proxy_port[0]) {
3900 nxport->port = htons(r->rpool.
3901 proxy_port[0]);
3902 }
3903 }
3904 break;
3905 }
3906 #else
3907 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn))
3908 return (NULL);
3909 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3910 PF_POOL_BITMASK)
3911 PF_POOLMASK(naddr, naddr,
3912 &r->rpool.cur->addr.v.a.mask, daddr,
3913 pd->af);
3914
3915 if (r->rpool.proxy_port[1]) {
3916 u_int32_t tmp_nport;
3917
3918 tmp_nport = ((ntohs(dport) -
3919 ntohs(r->dst.port[0])) %
3920 (r->rpool.proxy_port[1] -
3921 r->rpool.proxy_port[0] + 1)) +
3922 r->rpool.proxy_port[0];
3923
3924 /* wrap around if necessary */
3925 if (tmp_nport > 65535)
3926 tmp_nport -= 65535;
3927 *nport = htons((u_int16_t)tmp_nport);
3928 } else if (r->rpool.proxy_port[0])
3929 *nport = htons(r->rpool.proxy_port[0]);
3930 #endif
3931 break;
3932 }
3933 default:
3934 return (NULL);
3935 }
3936 }
3937
3938 return (r);
3939 }
3940
3941 int
3942 pf_socket_lookup(int direction, struct pf_pdesc *pd)
3943 {
3944 struct pf_addr *saddr, *daddr;
3945 u_int16_t sport, dport;
3946 struct inpcbinfo *pi;
3947 struct inpcb *inp = NULL;
3948
3949 if (pd == NULL)
3950 return (-1);
3951 pd->lookup.uid = UID_MAX;
3952 pd->lookup.gid = GID_MAX;
3953 pd->lookup.pid = NO_PID;
3954
3955 switch (pd->proto) {
3956 case IPPROTO_TCP:
3957 if (pd->hdr.tcp == NULL)
3958 return (-1);
3959 sport = pd->hdr.tcp->th_sport;
3960 dport = pd->hdr.tcp->th_dport;
3961 pi = &tcbinfo;
3962 break;
3963 case IPPROTO_UDP:
3964 if (pd->hdr.udp == NULL)
3965 return (-1);
3966 sport = pd->hdr.udp->uh_sport;
3967 dport = pd->hdr.udp->uh_dport;
3968 pi = &udbinfo;
3969 break;
3970 default:
3971 return (-1);
3972 }
3973 if (direction == PF_IN) {
3974 saddr = pd->src;
3975 daddr = pd->dst;
3976 } else {
3977 u_int16_t p;
3978
3979 p = sport;
3980 sport = dport;
3981 dport = p;
3982 saddr = pd->dst;
3983 daddr = pd->src;
3984 }
3985 switch (pd->af) {
3986 #if INET
3987 case AF_INET:
3988 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4, dport,
3989 0, NULL);
3990 #if INET6
3991 if (inp == NULL) {
3992 struct in6_addr s6, d6;
3993
3994 memset(&s6, 0, sizeof (s6));
3995 s6.s6_addr16[5] = htons(0xffff);
3996 memcpy(&s6.s6_addr32[3], &saddr->v4,
3997 sizeof (saddr->v4));
3998
3999 memset(&d6, 0, sizeof (d6));
4000 d6.s6_addr16[5] = htons(0xffff);
4001 memcpy(&d6.s6_addr32[3], &daddr->v4,
4002 sizeof (daddr->v4));
4003
4004 inp = in6_pcblookup_hash(pi, &s6, sport,
4005 &d6, dport, 0, NULL);
4006 if (inp == NULL) {
4007 inp = in_pcblookup_hash(pi, saddr->v4, sport,
4008 daddr->v4, dport, INPLOOKUP_WILDCARD, NULL);
4009 if (inp == NULL) {
4010 inp = in6_pcblookup_hash(pi, &s6, sport,
4011 &d6, dport, INPLOOKUP_WILDCARD,
4012 NULL);
4013 if (inp == NULL)
4014 return (-1);
4015 }
4016 }
4017 }
4018 #else
4019 if (inp == NULL) {
4020 inp = in_pcblookup_hash(pi, saddr->v4, sport,
4021 daddr->v4, dport, INPLOOKUP_WILDCARD, NULL);
4022 if (inp == NULL)
4023 return (-1);
4024 }
4025 #endif /* !INET6 */
4026 break;
4027 #endif /* INET */
4028 #if INET6
4029 case AF_INET6:
4030 inp = in6_pcblookup_hash(pi, &saddr->v6, sport, &daddr->v6,
4031 dport, 0, NULL);
4032 if (inp == NULL) {
4033 inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
4034 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL);
4035 if (inp == NULL)
4036 return (-1);
4037 }
4038 break;
4039 #endif /* INET6 */
4040
4041 default:
4042 return (-1);
4043 }
4044
4045 if (inp != NULL)
4046 in_pcb_checkstate(inp, WNT_RELEASE, 0);
4047
4048 return (1);
4049 }
4050
4051 static u_int8_t
4052 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
4053 {
4054 int hlen;
4055 u_int8_t hdr[60];
4056 u_int8_t *opt, optlen;
4057 u_int8_t wscale = 0;
4058
4059 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
4060 if (hlen <= (int)sizeof (struct tcphdr))
4061 return (0);
4062 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
4063 return (0);
4064 opt = hdr + sizeof (struct tcphdr);
4065 hlen -= sizeof (struct tcphdr);
4066 while (hlen >= 3) {
4067 switch (*opt) {
4068 case TCPOPT_EOL:
4069 case TCPOPT_NOP:
4070 ++opt;
4071 --hlen;
4072 break;
4073 case TCPOPT_WINDOW:
4074 wscale = opt[2];
4075 if (wscale > TCP_MAX_WINSHIFT)
4076 wscale = TCP_MAX_WINSHIFT;
4077 wscale |= PF_WSCALE_FLAG;
4078 /* FALLTHROUGH */
4079 default:
4080 optlen = opt[1];
4081 if (optlen < 2)
4082 optlen = 2;
4083 hlen -= optlen;
4084 opt += optlen;
4085 break;
4086 }
4087 }
4088 return (wscale);
4089 }
4090
4091 static u_int16_t
4092 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
4093 {
4094 int hlen;
4095 u_int8_t hdr[60];
4096 u_int8_t *opt, optlen;
4097 u_int16_t mss = tcp_mssdflt;
4098
4099 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
4100 if (hlen <= (int)sizeof (struct tcphdr))
4101 return (0);
4102 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
4103 return (0);
4104 opt = hdr + sizeof (struct tcphdr);
4105 hlen -= sizeof (struct tcphdr);
4106 while (hlen >= TCPOLEN_MAXSEG) {
4107 switch (*opt) {
4108 case TCPOPT_EOL:
4109 case TCPOPT_NOP:
4110 ++opt;
4111 --hlen;
4112 break;
4113 case TCPOPT_MAXSEG:
4114 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
4115 #if BYTE_ORDER != BIG_ENDIAN
4116 NTOHS(mss);
4117 #endif
4118 /* FALLTHROUGH */
4119 default:
4120 optlen = opt[1];
4121 if (optlen < 2)
4122 optlen = 2;
4123 hlen -= optlen;
4124 opt += optlen;
4125 break;
4126 }
4127 }
4128 return (mss);
4129 }
4130
4131 static u_int16_t
4132 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
4133 {
4134 #if INET
4135 struct sockaddr_in *dst;
4136 struct route ro;
4137 #endif /* INET */
4138 #if INET6
4139 struct sockaddr_in6 *dst6;
4140 struct route_in6 ro6;
4141 #endif /* INET6 */
4142 struct rtentry *rt = NULL;
4143 int hlen;
4144 u_int16_t mss = tcp_mssdflt;
4145
4146 switch (af) {
4147 #if INET
4148 case AF_INET:
4149 hlen = sizeof (struct ip);
4150 bzero(&ro, sizeof (ro));
4151 dst = (struct sockaddr_in *)&ro.ro_dst;
4152 dst->sin_family = AF_INET;
4153 dst->sin_len = sizeof (*dst);
4154 dst->sin_addr = addr->v4;
4155 rtalloc(&ro);
4156 rt = ro.ro_rt;
4157 break;
4158 #endif /* INET */
4159 #if INET6
4160 case AF_INET6:
4161 hlen = sizeof (struct ip6_hdr);
4162 bzero(&ro6, sizeof (ro6));
4163 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
4164 dst6->sin6_family = AF_INET6;
4165 dst6->sin6_len = sizeof (*dst6);
4166 dst6->sin6_addr = addr->v6;
4167 rtalloc((struct route *)&ro);
4168 rt = ro6.ro_rt;
4169 break;
4170 #endif /* INET6 */
4171 default:
4172 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4173 return (0);
4174 }
4175
4176 if (rt && rt->rt_ifp) {
4177 mss = rt->rt_ifp->if_mtu - hlen - sizeof (struct tcphdr);
4178 mss = max(tcp_mssdflt, mss);
4179 RTFREE(rt);
4180 }
4181 mss = min(mss, offer);
4182 mss = max(mss, 64); /* sanity - at least max opt space */
4183 return (mss);
4184 }
4185
4186 static void
4187 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
4188 {
4189 struct pf_rule *r = s->rule.ptr;
4190
4191 s->rt_kif = NULL;
4192 if (!r->rt || r->rt == PF_FASTROUTE)
4193 return;
4194 switch (s->state_key->af) {
4195 #if INET
4196 case AF_INET:
4197 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
4198 &s->nat_src_node);
4199 s->rt_kif = r->rpool.cur->kif;
4200 break;
4201 #endif /* INET */
4202 #if INET6
4203 case AF_INET6:
4204 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
4205 &s->nat_src_node);
4206 s->rt_kif = r->rpool.cur->kif;
4207 break;
4208 #endif /* INET6 */
4209 }
4210 }
4211
4212 static void
4213 pf_attach_state(struct pf_state_key *sk, struct pf_state *s, int tail)
4214 {
4215 s->state_key = sk;
4216 sk->refcnt++;
4217
4218 /* list is sorted, if-bound states before floating */
4219 if (tail)
4220 TAILQ_INSERT_TAIL(&sk->states, s, next);
4221 else
4222 TAILQ_INSERT_HEAD(&sk->states, s, next);
4223 }
4224
4225 static void
4226 pf_detach_state(struct pf_state *s, int flags)
4227 {
4228 struct pf_state_key *sk = s->state_key;
4229
4230 if (sk == NULL)
4231 return;
4232
4233 s->state_key = NULL;
4234 TAILQ_REMOVE(&sk->states, s, next);
4235 if (--sk->refcnt == 0) {
4236 if (!(flags & PF_DT_SKIP_EXTGWY))
4237 RB_REMOVE(pf_state_tree_ext_gwy,
4238 &pf_statetbl_ext_gwy, sk);
4239 if (!(flags & PF_DT_SKIP_LANEXT))
4240 RB_REMOVE(pf_state_tree_lan_ext,
4241 &pf_statetbl_lan_ext, sk);
4242 #ifndef NO_APPLE_EXTENSIONS
4243 if (sk->app_state)
4244 pool_put(&pf_app_state_pl, sk->app_state);
4245 #endif
4246 pool_put(&pf_state_key_pl, sk);
4247 }
4248 }
4249
4250 struct pf_state_key *
4251 pf_alloc_state_key(struct pf_state *s)
4252 {
4253 struct pf_state_key *sk;
4254
4255 if ((sk = pool_get(&pf_state_key_pl, PR_WAITOK)) == NULL)
4256 return (NULL);
4257 bzero(sk, sizeof (*sk));
4258 TAILQ_INIT(&sk->states);
4259 pf_attach_state(sk, s, 0);
4260
4261 return (sk);
4262 }
4263
4264 static u_int32_t
4265 pf_tcp_iss(struct pf_pdesc *pd)
4266 {
4267 MD5_CTX ctx;
4268 u_int32_t digest[4];
4269
4270 if (pf_tcp_secret_init == 0) {
4271 read_random(pf_tcp_secret, sizeof (pf_tcp_secret));
4272 MD5Init(&pf_tcp_secret_ctx);
4273 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
4274 sizeof (pf_tcp_secret));
4275 pf_tcp_secret_init = 1;
4276 }
4277 ctx = pf_tcp_secret_ctx;
4278
4279 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof (u_short));
4280 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof (u_short));
4281 if (pd->af == AF_INET6) {
4282 MD5Update(&ctx, (char *)&pd->src->v6, sizeof (struct in6_addr));
4283 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof (struct in6_addr));
4284 } else {
4285 MD5Update(&ctx, (char *)&pd->src->v4, sizeof (struct in_addr));
4286 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof (struct in_addr));
4287 }
4288 MD5Final((u_char *)digest, &ctx);
4289 pf_tcp_iss_off += 4096;
4290 return (digest[0] + random() + pf_tcp_iss_off);
4291 }
4292
4293 static int
4294 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
4295 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
4296 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
4297 struct ifqueue *ifq)
4298 {
4299 #pragma unused(h)
4300 struct pf_rule *nr = NULL;
4301 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4302 #ifdef NO_APPLE_EXTENSIONS
4303 u_int16_t bport, nport = 0;
4304 #endif
4305 sa_family_t af = pd->af;
4306 struct pf_rule *r, *a = NULL;
4307 struct pf_ruleset *ruleset = NULL;
4308 struct pf_src_node *nsn = NULL;
4309 struct tcphdr *th = pd->hdr.tcp;
4310 u_short reason;
4311 int rewrite = 0, hdrlen = 0;
4312 int tag = -1;
4313 unsigned int rtableid = IFSCOPE_NONE;
4314 int asd = 0;
4315 int match = 0;
4316 int state_icmp = 0;
4317 u_int16_t mss = tcp_mssdflt;
4318 #ifdef NO_APPLE_EXTENSIONS
4319 u_int16_t sport, dport;
4320 #endif
4321 u_int8_t icmptype = 0, icmpcode = 0;
4322
4323 #ifndef NO_APPLE_EXTENSIONS
4324 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
4325 union pf_state_xport bxport, nxport, sxport, dxport;
4326 #endif
4327
4328 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
4329
4330 if (direction == PF_IN && pf_check_congestion(ifq)) {
4331 REASON_SET(&reason, PFRES_CONGEST);
4332 return (PF_DROP);
4333 }
4334
4335 #ifndef NO_APPLE_EXTENSIONS
4336 hdrlen = 0;
4337 sxport.spi = 0;
4338 dxport.spi = 0;
4339 nxport.spi = 0;
4340 #else
4341 sport = dport = hdrlen = 0;
4342 #endif
4343
4344 switch (pd->proto) {
4345 case IPPROTO_TCP:
4346 #ifndef NO_APPLE_EXTENSIONS
4347 sxport.port = th->th_sport;
4348 dxport.port = th->th_dport;
4349 #else
4350 sport = th->th_sport;
4351 dport = th->th_dport;
4352 #endif
4353 hdrlen = sizeof (*th);
4354 break;
4355 case IPPROTO_UDP:
4356 #ifndef NO_APPLE_EXTENSIONS
4357 sxport.port = pd->hdr.udp->uh_sport;
4358 dxport.port = pd->hdr.udp->uh_dport;
4359 #else
4360 sport = pd->hdr.udp->uh_sport;
4361 dport = pd->hdr.udp->uh_dport;
4362 #endif
4363 hdrlen = sizeof (*pd->hdr.udp);
4364 break;
4365 #if INET
4366 case IPPROTO_ICMP:
4367 if (pd->af != AF_INET)
4368 break;
4369 #ifndef NO_APPLE_EXTENSIONS
4370 sxport.port = dxport.port = pd->hdr.icmp->icmp_id;
4371 hdrlen = ICMP_MINLEN;
4372 #else
4373 sport = dport = pd->hdr.icmp->icmp_id;
4374 #endif
4375 icmptype = pd->hdr.icmp->icmp_type;
4376 icmpcode = pd->hdr.icmp->icmp_code;
4377
4378 if (icmptype == ICMP_UNREACH ||
4379 icmptype == ICMP_SOURCEQUENCH ||
4380 icmptype == ICMP_REDIRECT ||
4381 icmptype == ICMP_TIMXCEED ||
4382 icmptype == ICMP_PARAMPROB)
4383 state_icmp++;
4384 break;
4385 #endif /* INET */
4386 #if INET6
4387 case IPPROTO_ICMPV6:
4388 if (pd->af != AF_INET6)
4389 break;
4390 #ifndef NO_APPLE_EXTENSIONS
4391 sxport.port = dxport.port = pd->hdr.icmp6->icmp6_id;
4392 #else
4393 sport = dport = pd->hdr.icmp6->icmp6_id;
4394 #endif
4395 hdrlen = sizeof (*pd->hdr.icmp6);
4396 icmptype = pd->hdr.icmp6->icmp6_type;
4397 icmpcode = pd->hdr.icmp6->icmp6_code;
4398
4399 if (icmptype == ICMP6_DST_UNREACH ||
4400 icmptype == ICMP6_PACKET_TOO_BIG ||
4401 icmptype == ICMP6_TIME_EXCEEDED ||
4402 icmptype == ICMP6_PARAM_PROB)
4403 state_icmp++;
4404 break;
4405 #endif /* INET6 */
4406 #ifndef NO_APPLE_EXTENSIONS
4407 case IPPROTO_GRE:
4408 if (pd->proto_variant == PF_GRE_PPTP_VARIANT) {
4409 sxport.call_id = dxport.call_id =
4410 pd->hdr.grev1->call_id;
4411 hdrlen = sizeof (*pd->hdr.grev1);
4412 }
4413 break;
4414 case IPPROTO_ESP:
4415 sxport.spi = 0;
4416 dxport.spi = pd->hdr.esp->spi;
4417 hdrlen = sizeof (*pd->hdr.esp);
4418 break;
4419 #endif
4420 }
4421
4422 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4423
4424 if (direction == PF_OUT) {
4425 #ifndef NO_APPLE_EXTENSIONS
4426 bxport = nxport = sxport;
4427 /* check outgoing packet for BINAT/NAT */
4428 if ((nr = pf_get_translation_aux(pd, m, off, PF_OUT, kif, &nsn,
4429 saddr, &sxport, daddr, &dxport, &pd->naddr, &nxport)) !=
4430 NULL) {
4431 #else
4432 bport = nport = sport;
4433 /* check outgoing packet for BINAT/NAT */
4434 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
4435 saddr, sport, daddr, dport, &pd->naddr, &nport)) != NULL) {
4436 #endif
4437 PF_ACPY(&pd->baddr, saddr, af);
4438 switch (pd->proto) {
4439 case IPPROTO_TCP:
4440 #ifndef NO_APPLE_EXTENSIONS
4441 pf_change_ap(direction, pd->mp, saddr,
4442 &th->th_sport, pd->ip_sum, &th->th_sum,
4443 &pd->naddr, nxport.port, 0, af);
4444 sxport.port = th->th_sport;
4445 #else
4446 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
4447 &th->th_sum, &pd->naddr, nport, 0, af);
4448 sport = th->th_sport;
4449 #endif
4450 rewrite++;
4451 break;
4452 case IPPROTO_UDP:
4453 #ifndef NO_APPLE_EXTENSIONS
4454 pf_change_ap(direction, pd->mp, saddr,
4455 &pd->hdr.udp->uh_sport, pd->ip_sum,
4456 &pd->hdr.udp->uh_sum, &pd->naddr,
4457 nxport.port, 1, af);
4458 sxport.port = pd->hdr.udp->uh_sport;
4459 #else
4460 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
4461 pd->ip_sum, &pd->hdr.udp->uh_sum,
4462 &pd->naddr, nport, 1, af);
4463 sport = pd->hdr.udp->uh_sport;
4464 #endif
4465 rewrite++;
4466 break;
4467 #if INET
4468 case IPPROTO_ICMP:
4469 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
4470 pd->naddr.v4.s_addr, 0);
4471 #ifndef NO_APPLE_EXTENSIONS
4472 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
4473 pd->hdr.icmp->icmp_cksum, sxport.port,
4474 nxport.port, 0);
4475 pd->hdr.icmp->icmp_id = nxport.port;
4476 ++rewrite;
4477 #else
4478 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
4479 pd->hdr.icmp->icmp_cksum, sport, nport, 0);
4480 pd->hdr.icmp->icmp_id = nport;
4481 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
4482 #endif
4483 break;
4484 #endif /* INET */
4485 #if INET6
4486 case IPPROTO_ICMPV6:
4487 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
4488 &pd->naddr, 0);
4489 rewrite++;
4490 break;
4491 #endif /* INET */
4492 #ifndef NO_APPLE_EXTENSIONS
4493 case IPPROTO_GRE:
4494 switch (af) {
4495 #if INET
4496 case AF_INET:
4497 pf_change_a(&saddr->v4.s_addr,
4498 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4499 break;
4500 #endif /* INET */
4501 #if INET6
4502 case AF_INET6:
4503 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4504 break;
4505 #endif /* INET6 */
4506 }
4507 ++rewrite;
4508 break;
4509 case IPPROTO_ESP:
4510 bxport.spi = 0;
4511 switch (af) {
4512 #if INET
4513 case AF_INET:
4514 pf_change_a(&saddr->v4.s_addr,
4515 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4516 break;
4517 #endif /* INET */
4518 #if INET6
4519 case AF_INET6:
4520 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4521 break;
4522 #endif /* INET6 */
4523 }
4524 break;
4525 #endif
4526 default:
4527 switch (af) {
4528 #if INET
4529 case AF_INET:
4530 pf_change_a(&saddr->v4.s_addr,
4531 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4532 break;
4533 #endif /* INET */
4534 #if INET6
4535 case AF_INET6:
4536 PF_ACPY(saddr, &pd->naddr, af);
4537 break;
4538 #endif /* INET */
4539 }
4540 break;
4541 }
4542
4543 if (nr->natpass)
4544 r = NULL;
4545 pd->nat_rule = nr;
4546 }
4547 } else {
4548 #ifndef NO_APPLE_EXTENSIONS
4549 bxport.port = nxport.port = dxport.port;
4550 /* check incoming packet for BINAT/RDR */
4551 if ((nr = pf_get_translation_aux(pd, m, off, PF_IN, kif, &nsn,
4552 saddr, &sxport, daddr, &dxport, &pd->naddr, &nxport)) !=
4553 NULL) {
4554 #else
4555 bport = nport = dport;
4556 /* check incoming packet for BINAT/RDR */
4557 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
4558 saddr, sport, daddr, dport, &pd->naddr, &nport)) != NULL) {
4559 #endif
4560 PF_ACPY(&pd->baddr, daddr, af);
4561 switch (pd->proto) {
4562 case IPPROTO_TCP:
4563 #ifndef NO_APPLE_EXTENSIONS
4564 pf_change_ap(direction, pd->mp, daddr,
4565 &th->th_dport, pd->ip_sum, &th->th_sum,
4566 &pd->naddr, nxport.port, 0, af);
4567 dxport.port = th->th_dport;
4568 #else
4569 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
4570 &th->th_sum, &pd->naddr, nport, 0, af);
4571 dport = th->th_dport;
4572 #endif
4573 rewrite++;
4574 break;
4575 case IPPROTO_UDP:
4576 #ifndef NO_APPLE_EXTENSIONS
4577 pf_change_ap(direction, pd->mp, daddr,
4578 &pd->hdr.udp->uh_dport, pd->ip_sum,
4579 &pd->hdr.udp->uh_sum, &pd->naddr,
4580 nxport.port, 1, af);
4581 dxport.port = pd->hdr.udp->uh_dport;
4582 #else
4583 pf_change_ap(direction, daddr,
4584 &pd->hdr.udp->uh_dport,
4585 pd->ip_sum, &pd->hdr.udp->uh_sum,
4586 &pd->naddr, nport, 1, af);
4587 dport = pd->hdr.udp->uh_dport;
4588 #endif
4589 rewrite++;
4590 break;
4591 #if INET
4592 case IPPROTO_ICMP:
4593 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
4594 pd->naddr.v4.s_addr, 0);
4595 break;
4596 #endif /* INET */
4597 #if INET6
4598 case IPPROTO_ICMPV6:
4599 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
4600 &pd->naddr, 0);
4601 rewrite++;
4602 break;
4603 #endif /* INET6 */
4604 #ifndef NO_APPLE_EXTENSIONS
4605 case IPPROTO_GRE:
4606 if (pd->proto_variant == PF_GRE_PPTP_VARIANT)
4607 grev1->call_id = nxport.call_id;
4608
4609 switch (af) {
4610 #if INET
4611 case AF_INET:
4612 pf_change_a(&daddr->v4.s_addr,
4613 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4614 break;
4615 #endif /* INET */
4616 #if INET6
4617 case AF_INET6:
4618 PF_ACPY(daddr, &pd->naddr, AF_INET6);
4619 break;
4620 #endif /* INET6 */
4621 }
4622 ++rewrite;
4623 break;
4624 case IPPROTO_ESP:
4625 switch (af) {
4626 #if INET
4627 case AF_INET:
4628 pf_change_a(&daddr->v4.s_addr,
4629 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4630 break;
4631 #endif /* INET */
4632 #if INET6
4633 case AF_INET6:
4634 PF_ACPY(daddr, &pd->naddr, AF_INET6);
4635 break;
4636 #endif /* INET6 */
4637 }
4638 break;
4639 #endif
4640 default:
4641 switch (af) {
4642 #if INET
4643 case AF_INET:
4644 pf_change_a(&daddr->v4.s_addr,
4645 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4646 break;
4647 #endif /* INET */
4648 #if INET6
4649 case AF_INET6:
4650 PF_ACPY(daddr, &pd->naddr, af);
4651 break;
4652 #endif /* INET */
4653 }
4654 break;
4655 }
4656
4657 if (nr->natpass)
4658 r = NULL;
4659 pd->nat_rule = nr;
4660 }
4661 }
4662
4663 #ifndef NO_APPLE_EXTENSIONS
4664 if (nr && nr->tag > 0)
4665 tag = nr->tag;
4666 #endif
4667
4668 while (r != NULL) {
4669 r->evaluations++;
4670 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4671 r = r->skip[PF_SKIP_IFP].ptr;
4672 else if (r->direction && r->direction != direction)
4673 r = r->skip[PF_SKIP_DIR].ptr;
4674 else if (r->af && r->af != af)
4675 r = r->skip[PF_SKIP_AF].ptr;
4676 else if (r->proto && r->proto != pd->proto)
4677 r = r->skip[PF_SKIP_PROTO].ptr;
4678 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
4679 r->src.neg, kif))
4680 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4681 /* tcp/udp only. port_op always 0 in other cases */
4682 #ifndef NO_APPLE_EXTENSIONS
4683 else if (r->proto == pd->proto &&
4684 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4685 r->src.xport.range.op &&
4686 !pf_match_port(r->src.xport.range.op,
4687 r->src.xport.range.port[0], r->src.xport.range.port[1],
4688 th->th_sport))
4689 #else
4690 else if (r->src.port_op && !pf_match_port(r->src.port_op,
4691 r->src.port[0], r->src.port[1], th->th_sport))
4692 #endif
4693 r = r->skip[PF_SKIP_SRC_PORT].ptr;
4694 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
4695 r->dst.neg, NULL))
4696 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4697 /* tcp/udp only. port_op always 0 in other cases */
4698 #ifndef NO_APPLE_EXTENSIONS
4699 else if (r->proto == pd->proto &&
4700 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4701 r->dst.xport.range.op &&
4702 !pf_match_port(r->dst.xport.range.op,
4703 r->dst.xport.range.port[0], r->dst.xport.range.port[1],
4704 th->th_dport))
4705 #else
4706 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
4707 r->dst.port[0], r->dst.port[1], th->th_dport))
4708 #endif
4709 r = r->skip[PF_SKIP_DST_PORT].ptr;
4710 /* icmp only. type always 0 in other cases */
4711 else if (r->type && r->type != icmptype + 1)
4712 r = TAILQ_NEXT(r, entries);
4713 /* icmp only. type always 0 in other cases */
4714 else if (r->code && r->code != icmpcode + 1)
4715 r = TAILQ_NEXT(r, entries);
4716 else if (r->tos && !(r->tos == pd->tos))
4717 r = TAILQ_NEXT(r, entries);
4718 else if (r->rule_flag & PFRULE_FRAGMENT)
4719 r = TAILQ_NEXT(r, entries);
4720 else if (pd->proto == IPPROTO_TCP &&
4721 (r->flagset & th->th_flags) != r->flags)
4722 r = TAILQ_NEXT(r, entries);
4723 /* tcp/udp only. uid.op always 0 in other cases */
4724 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
4725 pf_socket_lookup(direction, pd), 1)) &&
4726 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
4727 pd->lookup.uid))
4728 r = TAILQ_NEXT(r, entries);
4729 /* tcp/udp only. gid.op always 0 in other cases */
4730 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
4731 pf_socket_lookup(direction, pd), 1)) &&
4732 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
4733 pd->lookup.gid))
4734 r = TAILQ_NEXT(r, entries);
4735 else if (r->prob && r->prob <= (random() % (UINT_MAX - 1) + 1))
4736 r = TAILQ_NEXT(r, entries);
4737 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
4738 r = TAILQ_NEXT(r, entries);
4739 else if (r->os_fingerprint != PF_OSFP_ANY &&
4740 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
4741 pf_osfp_fingerprint(pd, m, off, th),
4742 r->os_fingerprint)))
4743 r = TAILQ_NEXT(r, entries);
4744 else {
4745 if (r->tag)
4746 tag = r->tag;
4747 if (PF_RTABLEID_IS_VALID(r->rtableid))
4748 rtableid = r->rtableid;
4749 if (r->anchor == NULL) {
4750 match = 1;
4751 *rm = r;
4752 *am = a;
4753 *rsm = ruleset;
4754 if ((*rm)->quick)
4755 break;
4756 r = TAILQ_NEXT(r, entries);
4757 } else
4758 pf_step_into_anchor(&asd, &ruleset,
4759 PF_RULESET_FILTER, &r, &a, &match);
4760 }
4761 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4762 PF_RULESET_FILTER, &r, &a, &match))
4763 break;
4764 }
4765 r = *rm;
4766 a = *am;
4767 ruleset = *rsm;
4768
4769 REASON_SET(&reason, PFRES_MATCH);
4770
4771 if (r->log || (nr != NULL && nr->log)) {
4772 #ifndef NO_APPLE_EXTENSIONS
4773 if (rewrite > 0) {
4774 if (rewrite < off + hdrlen)
4775 rewrite = off + hdrlen;
4776
4777 m = pf_lazy_makewritable(pd, m, rewrite);
4778 if (!m) {
4779 REASON_SET(&reason, PFRES_MEMORY);
4780 return (PF_DROP);
4781 }
4782
4783 m_copyback(m, off, hdrlen, pd->hdr.any);
4784 }
4785 #else
4786 if (rewrite)
4787 m_copyback(m, off, hdrlen, pd->hdr.any);
4788 #endif
4789 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
4790 a, ruleset, pd);
4791 }
4792
4793 if ((r->action == PF_DROP) &&
4794 ((r->rule_flag & PFRULE_RETURNRST) ||
4795 (r->rule_flag & PFRULE_RETURNICMP) ||
4796 (r->rule_flag & PFRULE_RETURN))) {
4797 /* undo NAT changes, if they have taken place */
4798 if (nr != NULL) {
4799 if (direction == PF_OUT) {
4800 switch (pd->proto) {
4801 case IPPROTO_TCP:
4802 #ifndef NO_APPLE_EXTENSIONS
4803 pf_change_ap(direction, pd->mp, saddr,
4804 &th->th_sport, pd->ip_sum,
4805 &th->th_sum, &pd->baddr,
4806 bxport.port, 0, af);
4807 sxport.port = th->th_sport;
4808 #else
4809 pf_change_ap(saddr, &th->th_sport,
4810 pd->ip_sum, &th->th_sum,
4811 &pd->baddr, bport, 0, af);
4812 sport = th->th_sport;
4813 #endif
4814 rewrite++;
4815 break;
4816 case IPPROTO_UDP:
4817 #ifndef NO_APPLE_EXTENSIONS
4818 pf_change_ap(direction, pd->mp, saddr,
4819 &pd->hdr.udp->uh_sport, pd->ip_sum,
4820 &pd->hdr.udp->uh_sum, &pd->baddr,
4821 bxport.port, 1, af);
4822 sxport.port = pd->hdr.udp->uh_sport;
4823 #else
4824 pf_change_ap(saddr,
4825 &pd->hdr.udp->uh_sport, pd->ip_sum,
4826 &pd->hdr.udp->uh_sum, &pd->baddr,
4827 bport, 1, af);
4828 sport = pd->hdr.udp->uh_sport;
4829 #endif
4830 rewrite++;
4831 break;
4832 case IPPROTO_ICMP:
4833 #if INET6
4834 case IPPROTO_ICMPV6:
4835 #endif
4836 /* nothing! */
4837 break;
4838 #ifndef NO_APPLE_EXTENSIONS
4839 case IPPROTO_GRE:
4840 PF_ACPY(&pd->baddr, saddr, af);
4841 ++rewrite;
4842 switch (af) {
4843 #if INET
4844 case AF_INET:
4845 pf_change_a(&saddr->v4.s_addr,
4846 pd->ip_sum,
4847 pd->baddr.v4.s_addr, 0);
4848 break;
4849 #endif /* INET */
4850 #if INET6
4851 case AF_INET6:
4852 PF_ACPY(saddr, &pd->baddr,
4853 AF_INET6);
4854 break;
4855 #endif /* INET6 */
4856 }
4857 break;
4858 case IPPROTO_ESP:
4859 PF_ACPY(&pd->baddr, saddr, af);
4860 switch (af) {
4861 #if INET
4862 case AF_INET:
4863 pf_change_a(&saddr->v4.s_addr,
4864 pd->ip_sum,
4865 pd->baddr.v4.s_addr, 0);
4866 break;
4867 #endif /* INET */
4868 #if INET6
4869 case AF_INET6:
4870 PF_ACPY(saddr, &pd->baddr,
4871 AF_INET6);
4872 break;
4873 #endif /* INET6 */
4874 }
4875 break;
4876 #endif
4877 default:
4878 switch (af) {
4879 case AF_INET:
4880 pf_change_a(&saddr->v4.s_addr,
4881 pd->ip_sum,
4882 pd->baddr.v4.s_addr, 0);
4883 break;
4884 case AF_INET6:
4885 PF_ACPY(saddr, &pd->baddr, af);
4886 break;
4887 }
4888 }
4889 } else {
4890 switch (pd->proto) {
4891 case IPPROTO_TCP:
4892 #ifndef NO_APPLE_EXTENSIONS
4893 pf_change_ap(direction, pd->mp, daddr,
4894 &th->th_dport, pd->ip_sum,
4895 &th->th_sum, &pd->baddr,
4896 bxport.port, 0, af);
4897 dxport.port = th->th_dport;
4898 #else
4899 pf_change_ap(daddr, &th->th_dport,
4900 pd->ip_sum, &th->th_sum,
4901 &pd->baddr, bport, 0, af);
4902 dport = th->th_dport;
4903 #endif
4904 rewrite++;
4905 break;
4906 case IPPROTO_UDP:
4907 #ifndef NO_APPLE_EXTENSIONS
4908 pf_change_ap(direction, pd->mp, daddr,
4909 &pd->hdr.udp->uh_dport, pd->ip_sum,
4910 &pd->hdr.udp->uh_sum, &pd->baddr,
4911 bxport.port, 1, af);
4912 dxport.port = pd->hdr.udp->uh_dport;
4913 #else
4914 pf_change_ap(daddr,
4915 &pd->hdr.udp->uh_dport, pd->ip_sum,
4916 &pd->hdr.udp->uh_sum, &pd->baddr,
4917 bport, 1, af);
4918 dport = pd->hdr.udp->uh_dport;
4919 #endif
4920 rewrite++;
4921 break;
4922 case IPPROTO_ICMP:
4923 #if INET6
4924 case IPPROTO_ICMPV6:
4925 #endif
4926 /* nothing! */
4927 break;
4928 #ifndef NO_APPLE_EXTENSIONS
4929 case IPPROTO_GRE:
4930 if (pd->proto_variant ==
4931 PF_GRE_PPTP_VARIANT)
4932 grev1->call_id = bxport.call_id;
4933 ++rewrite;
4934 switch (af) {
4935 #if INET
4936 case AF_INET:
4937 pf_change_a(&daddr->v4.s_addr,
4938 pd->ip_sum,
4939 pd->baddr.v4.s_addr, 0);
4940 break;
4941 #endif /* INET */
4942 #if INET6
4943 case AF_INET6:
4944 PF_ACPY(daddr, &pd->baddr,
4945 AF_INET6);
4946 break;
4947 #endif /* INET6 */
4948 }
4949 break;
4950 case IPPROTO_ESP:
4951 switch (af) {
4952 #if INET
4953 case AF_INET:
4954 pf_change_a(&daddr->v4.s_addr,
4955 pd->ip_sum,
4956 pd->baddr.v4.s_addr, 0);
4957 break;
4958 #endif /* INET */
4959 #if INET6
4960 case AF_INET6:
4961 PF_ACPY(daddr, &pd->baddr,
4962 AF_INET6);
4963 break;
4964 #endif /* INET6 */
4965 }
4966 break;
4967 #endif
4968 default:
4969 switch (af) {
4970 case AF_INET:
4971 pf_change_a(&daddr->v4.s_addr,
4972 pd->ip_sum,
4973 pd->baddr.v4.s_addr, 0);
4974 break;
4975 #if INET6
4976 case AF_INET6:
4977 PF_ACPY(daddr, &pd->baddr, af);
4978 break;
4979 #endif /* INET6 */
4980 }
4981 }
4982 }
4983 }
4984 if (pd->proto == IPPROTO_TCP &&
4985 ((r->rule_flag & PFRULE_RETURNRST) ||
4986 (r->rule_flag & PFRULE_RETURN)) &&
4987 !(th->th_flags & TH_RST)) {
4988 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
4989 int len = 0;
4990 struct ip *h4;
4991 #if INET6
4992 struct ip6_hdr *h6;
4993 #endif /* INET6 */
4994
4995 switch (af) {
4996 case AF_INET:
4997 h4 = mtod(m, struct ip *);
4998 len = ntohs(h4->ip_len) - off;
4999 break;
5000 #if INET6
5001 case AF_INET6:
5002 h6 = mtod(m, struct ip6_hdr *);
5003 len = ntohs(h6->ip6_plen) -
5004 (off - sizeof (*h6));
5005 break;
5006 #endif /* INET6 */
5007 }
5008
5009 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
5010 REASON_SET(&reason, PFRES_PROTCKSUM);
5011 else {
5012 if (th->th_flags & TH_SYN)
5013 ack++;
5014 if (th->th_flags & TH_FIN)
5015 ack++;
5016 pf_send_tcp(r, af, pd->dst,
5017 pd->src, th->th_dport, th->th_sport,
5018 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
5019 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
5020 }
5021 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
5022 #ifndef NO_APPLE_EXTENSIONS
5023 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
5024 #endif
5025 r->return_icmp)
5026 pf_send_icmp(m, r->return_icmp >> 8,
5027 r->return_icmp & 255, af, r);
5028 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
5029 #ifndef NO_APPLE_EXTENSIONS
5030 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
5031 #endif
5032 r->return_icmp6)
5033 pf_send_icmp(m, r->return_icmp6 >> 8,
5034 r->return_icmp6 & 255, af, r);
5035 }
5036
5037 if (r->action == PF_DROP)
5038 return (PF_DROP);
5039
5040 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid)) {
5041 REASON_SET(&reason, PFRES_MEMORY);
5042 return (PF_DROP);
5043 }
5044
5045 if (!state_icmp && (r->keep_state || nr != NULL ||
5046 (pd->flags & PFDESC_TCP_NORM))) {
5047 /* create new state */
5048 struct pf_state *s = NULL;
5049 struct pf_state_key *sk = NULL;
5050 struct pf_src_node *sn = NULL;
5051 #ifndef NO_APPLE_EXTENSIONS
5052 struct pf_ike_hdr ike;
5053
5054 if (pd->proto == IPPROTO_UDP) {
5055 struct udphdr *uh = pd->hdr.udp;
5056 size_t plen = m->m_pkthdr.len - off - sizeof (*uh);
5057
5058 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
5059 ntohs(uh->uh_dport) == PF_IKE_PORT &&
5060 plen >= PF_IKE_PACKET_MINSIZE) {
5061 if (plen > PF_IKE_PACKET_MINSIZE)
5062 plen = PF_IKE_PACKET_MINSIZE;
5063 m_copydata(m, off + sizeof (*uh), plen, &ike);
5064 }
5065 }
5066
5067 if (nr != NULL && pd->proto == IPPROTO_ESP &&
5068 direction == PF_OUT) {
5069 struct pf_state_key_cmp sk0;
5070 struct pf_state *s0;
5071
5072 /*
5073 * <jhw@apple.com>
5074 * This squelches state creation if the external
5075 * address matches an existing incomplete state with a
5076 * different internal address. Only one 'blocking'
5077 * partial state is allowed for each external address.
5078 */
5079 memset(&sk0, 0, sizeof (sk0));
5080 sk0.af = pd->af;
5081 sk0.proto = IPPROTO_ESP;
5082 PF_ACPY(&sk0.gwy.addr, saddr, sk0.af);
5083 PF_ACPY(&sk0.ext.addr, daddr, sk0.af);
5084 s0 = pf_find_state(kif, &sk0, PF_IN);
5085
5086 if (s0 && PF_ANEQ(&s0->state_key->lan.addr,
5087 pd->src, pd->af)) {
5088 nsn = 0;
5089 goto cleanup;
5090 }
5091 }
5092 #endif
5093
5094 /* check maximums */
5095 if (r->max_states && (r->states >= r->max_states)) {
5096 pf_status.lcounters[LCNT_STATES]++;
5097 REASON_SET(&reason, PFRES_MAXSTATES);
5098 goto cleanup;
5099 }
5100 /* src node for filter rule */
5101 if ((r->rule_flag & PFRULE_SRCTRACK ||
5102 r->rpool.opts & PF_POOL_STICKYADDR) &&
5103 pf_insert_src_node(&sn, r, saddr, af) != 0) {
5104 REASON_SET(&reason, PFRES_SRCLIMIT);
5105 goto cleanup;
5106 }
5107 /* src node for translation rule */
5108 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
5109 ((direction == PF_OUT &&
5110 #ifndef NO_APPLE_EXTENSIONS
5111 nr->action != PF_RDR &&
5112 #endif
5113 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
5114 (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
5115 REASON_SET(&reason, PFRES_SRCLIMIT);
5116 goto cleanup;
5117 }
5118 s = pool_get(&pf_state_pl, PR_WAITOK);
5119 if (s == NULL) {
5120 REASON_SET(&reason, PFRES_MEMORY);
5121 cleanup:
5122 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
5123 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
5124 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
5125 pf_status.src_nodes--;
5126 pool_put(&pf_src_tree_pl, sn);
5127 }
5128 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
5129 nsn->expire == 0) {
5130 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
5131 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
5132 pf_status.src_nodes--;
5133 pool_put(&pf_src_tree_pl, nsn);
5134 }
5135 if (sk != NULL) {
5136 #ifndef NO_APPLE_EXTENSIONS
5137 if (sk->app_state)
5138 pool_put(&pf_app_state_pl,
5139 sk->app_state);
5140 #endif
5141 pool_put(&pf_state_key_pl, sk);
5142 }
5143 return (PF_DROP);
5144 }
5145 bzero(s, sizeof (*s));
5146 #ifndef NO_APPLE_EXTENSIONS
5147 TAILQ_INIT(&s->unlink_hooks);
5148 #endif
5149 s->rule.ptr = r;
5150 s->nat_rule.ptr = nr;
5151 if (nr && nr->action == PF_RDR && direction == PF_OUT)
5152 s->anchor.ptr = a;
5153 STATE_INC_COUNTERS(s);
5154 s->allow_opts = r->allow_opts;
5155 s->log = r->log & PF_LOG_ALL;
5156 if (nr != NULL)
5157 s->log |= nr->log & PF_LOG_ALL;
5158 switch (pd->proto) {
5159 case IPPROTO_TCP:
5160 s->src.seqlo = ntohl(th->th_seq);
5161 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
5162 if ((th->th_flags & (TH_SYN|TH_ACK)) ==
5163 TH_SYN && r->keep_state == PF_STATE_MODULATE) {
5164 /* Generate sequence number modulator */
5165 if ((s->src.seqdiff = pf_tcp_iss(pd) -
5166 s->src.seqlo) == 0)
5167 s->src.seqdiff = 1;
5168 pf_change_a(&th->th_seq, &th->th_sum,
5169 htonl(s->src.seqlo + s->src.seqdiff), 0);
5170 rewrite = off + sizeof (*th);
5171 } else
5172 s->src.seqdiff = 0;
5173 if (th->th_flags & TH_SYN) {
5174 s->src.seqhi++;
5175 s->src.wscale = pf_get_wscale(m, off,
5176 th->th_off, af);
5177 }
5178 s->src.max_win = MAX(ntohs(th->th_win), 1);
5179 if (s->src.wscale & PF_WSCALE_MASK) {
5180 /* Remove scale factor from initial window */
5181 int win = s->src.max_win;
5182 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
5183 s->src.max_win = (win - 1) >>
5184 (s->src.wscale & PF_WSCALE_MASK);
5185 }
5186 if (th->th_flags & TH_FIN)
5187 s->src.seqhi++;
5188 s->dst.seqhi = 1;
5189 s->dst.max_win = 1;
5190 s->src.state = TCPS_SYN_SENT;
5191 s->dst.state = TCPS_CLOSED;
5192 s->timeout = PFTM_TCP_FIRST_PACKET;
5193 break;
5194 case IPPROTO_UDP:
5195 s->src.state = PFUDPS_SINGLE;
5196 s->dst.state = PFUDPS_NO_TRAFFIC;
5197 s->timeout = PFTM_UDP_FIRST_PACKET;
5198 break;
5199 case IPPROTO_ICMP:
5200 #if INET6
5201 case IPPROTO_ICMPV6:
5202 #endif
5203 s->timeout = PFTM_ICMP_FIRST_PACKET;
5204 break;
5205 #ifndef NO_APPLE_EXTENSIONS
5206 case IPPROTO_GRE:
5207 s->src.state = PFGRE1S_INITIATING;
5208 s->dst.state = PFGRE1S_NO_TRAFFIC;
5209 s->timeout = PFTM_GREv1_INITIATING;
5210 break;
5211 case IPPROTO_ESP:
5212 s->src.state = PFESPS_INITIATING;
5213 s->dst.state = PFESPS_NO_TRAFFIC;
5214 s->timeout = PFTM_ESP_FIRST_PACKET;
5215 break;
5216 #endif
5217 default:
5218 s->src.state = PFOTHERS_SINGLE;
5219 s->dst.state = PFOTHERS_NO_TRAFFIC;
5220 s->timeout = PFTM_OTHER_FIRST_PACKET;
5221 }
5222
5223 s->creation = pf_time_second();
5224 s->expire = pf_time_second();
5225
5226 if (sn != NULL) {
5227 s->src_node = sn;
5228 s->src_node->states++;
5229 VERIFY(s->src_node->states != 0);
5230 }
5231 if (nsn != NULL) {
5232 PF_ACPY(&nsn->raddr, &pd->naddr, af);
5233 s->nat_src_node = nsn;
5234 s->nat_src_node->states++;
5235 VERIFY(s->nat_src_node->states != 0);
5236 }
5237 if (pd->proto == IPPROTO_TCP) {
5238 if ((pd->flags & PFDESC_TCP_NORM) &&
5239 pf_normalize_tcp_init(m, off, pd, th, &s->src,
5240 &s->dst)) {
5241 REASON_SET(&reason, PFRES_MEMORY);
5242 pf_src_tree_remove_state(s);
5243 STATE_DEC_COUNTERS(s);
5244 pool_put(&pf_state_pl, s);
5245 return (PF_DROP);
5246 }
5247 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
5248 pf_normalize_tcp_stateful(m, off, pd, &reason,
5249 th, s, &s->src, &s->dst, &rewrite)) {
5250 /* This really shouldn't happen!!! */
5251 DPFPRINTF(PF_DEBUG_URGENT,
5252 ("pf_normalize_tcp_stateful failed on "
5253 "first pkt"));
5254 pf_normalize_tcp_cleanup(s);
5255 pf_src_tree_remove_state(s);
5256 STATE_DEC_COUNTERS(s);
5257 pool_put(&pf_state_pl, s);
5258 return (PF_DROP);
5259 }
5260 }
5261
5262 if ((sk = pf_alloc_state_key(s)) == NULL) {
5263 REASON_SET(&reason, PFRES_MEMORY);
5264 goto cleanup;
5265 }
5266
5267 sk->proto = pd->proto;
5268 sk->direction = direction;
5269 sk->af = af;
5270 #ifndef NO_APPLE_EXTENSIONS
5271 if (pd->proto == IPPROTO_UDP) {
5272 if (ntohs(pd->hdr.udp->uh_sport) == PF_IKE_PORT &&
5273 ntohs(pd->hdr.udp->uh_dport) == PF_IKE_PORT) {
5274 sk->proto_variant = PF_EXTFILTER_APD;
5275 } else {
5276 sk->proto_variant = nr ? nr->extfilter :
5277 r->extfilter;
5278 if (sk->proto_variant < PF_EXTFILTER_APD)
5279 sk->proto_variant = PF_EXTFILTER_APD;
5280 }
5281 } else if (pd->proto == IPPROTO_GRE) {
5282 sk->proto_variant = pd->proto_variant;
5283 }
5284 #endif
5285 if (direction == PF_OUT) {
5286 PF_ACPY(&sk->gwy.addr, saddr, af);
5287 PF_ACPY(&sk->ext.addr, daddr, af);
5288 switch (pd->proto) {
5289 #ifndef NO_APPLE_EXTENSIONS
5290 case IPPROTO_UDP:
5291 sk->gwy.xport = sxport;
5292 sk->ext.xport = dxport;
5293 break;
5294 case IPPROTO_ESP:
5295 sk->gwy.xport.spi = 0;
5296 sk->ext.xport.spi = pd->hdr.esp->spi;
5297 break;
5298 #endif
5299 case IPPROTO_ICMP:
5300 #if INET6
5301 case IPPROTO_ICMPV6:
5302 #endif
5303 #ifndef NO_APPLE_EXTENSIONS
5304 sk->gwy.xport.port = nxport.port;
5305 sk->ext.xport.spi = 0;
5306 #else
5307 sk->gwy.port = nport;
5308 sk->ext.port = 0;
5309 #endif
5310 break;
5311 default:
5312 #ifndef NO_APPLE_EXTENSIONS
5313 sk->gwy.xport = sxport;
5314 sk->ext.xport = dxport;
5315 break;
5316 #else
5317 sk->gwy.port = sport;
5318 sk->ext.port = dport;
5319 #endif
5320 }
5321 #ifndef NO_APPLE_EXTENSIONS
5322 if (nr != NULL) {
5323 PF_ACPY(&sk->lan.addr, &pd->baddr, af);
5324 sk->lan.xport = bxport;
5325 } else {
5326 PF_ACPY(&sk->lan.addr, &sk->gwy.addr, af);
5327 sk->lan.xport = sk->gwy.xport;
5328 }
5329 #else
5330 if (nr != NULL) {
5331 PF_ACPY(&sk->lan.addr, &pd->baddr, af);
5332 sk->lan.port = bport;
5333 } else {
5334 PF_ACPY(&sk->lan.addr, &sk->gwy.addr, af);
5335 sk->lan.port = sk->gwy.port;
5336 }
5337 #endif
5338 } else {
5339 PF_ACPY(&sk->lan.addr, daddr, af);
5340 PF_ACPY(&sk->ext.addr, saddr, af);
5341 switch (pd->proto) {
5342 case IPPROTO_ICMP:
5343 #if INET6
5344 case IPPROTO_ICMPV6:
5345 #endif
5346 #ifndef NO_APPLE_EXTENSIONS
5347 sk->lan.xport = nxport;
5348 sk->ext.xport.spi = 0;
5349 #else
5350 sk->lan.port = nport;
5351 sk->ext.port = 0;
5352 #endif
5353 break;
5354 #ifndef NO_APPLE_EXTENSIONS
5355 case IPPROTO_ESP:
5356 sk->ext.xport.spi = 0;
5357 sk->lan.xport.spi = pd->hdr.esp->spi;
5358 break;
5359 default:
5360 sk->lan.xport = dxport;
5361 sk->ext.xport = sxport;
5362 break;
5363 #else
5364 default:
5365 sk->lan.port = dport;
5366 sk->ext.port = sport;
5367 #endif
5368 }
5369 #ifndef NO_APPLE_EXTENSIONS
5370 if (nr != NULL) {
5371 PF_ACPY(&sk->gwy.addr, &pd->baddr, af);
5372 sk->gwy.xport = bxport;
5373 } else {
5374 PF_ACPY(&sk->gwy.addr, &sk->lan.addr, af);
5375 sk->gwy.xport = sk->lan.xport;
5376 }
5377 }
5378 #else
5379 if (nr != NULL) {
5380 PF_ACPY(&sk->gwy.addr, &pd->baddr, af);
5381 sk->gwy.port = bport;
5382 } else {
5383 PF_ACPY(&sk->gwy.addr, &sk->lan.addr, af);
5384 sk->gwy.port = sk->lan.port;
5385 }
5386 }
5387 #endif
5388
5389 pf_set_rt_ifp(s, saddr); /* needs s->state_key set */
5390
5391 #ifndef NO_APPLE_EXTENSIONS
5392 m = pd->mp;
5393
5394 if (sk->app_state == 0) {
5395 switch (pd->proto) {
5396 case IPPROTO_TCP: {
5397 u_int16_t dport = (direction == PF_OUT) ?
5398 sk->ext.xport.port : sk->gwy.xport.port;
5399
5400 if (nr != NULL &&
5401 ntohs(dport) == PF_PPTP_PORT) {
5402 struct pf_app_state *as;
5403
5404 as = pool_get(&pf_app_state_pl,
5405 PR_WAITOK);
5406 if (!as) {
5407 REASON_SET(&reason,
5408 PFRES_MEMORY);
5409 goto cleanup;
5410 }
5411
5412 bzero(as, sizeof (*as));
5413 as->handler = pf_pptp_handler;
5414 as->compare_lan_ext = 0;
5415 as->compare_ext_gwy = 0;
5416 as->u.pptp.grev1_state = 0;
5417 sk->app_state = as;
5418 (void) hook_establish(&s->unlink_hooks,
5419 0, (hook_fn_t) pf_pptp_unlink, s);
5420 }
5421 break;
5422 }
5423
5424 case IPPROTO_UDP: {
5425 struct udphdr *uh = pd->hdr.udp;
5426
5427 if (nr != NULL &&
5428 ntohs(uh->uh_sport) == PF_IKE_PORT &&
5429 ntohs(uh->uh_dport) == PF_IKE_PORT) {
5430 struct pf_app_state *as;
5431
5432 as = pool_get(&pf_app_state_pl,
5433 PR_WAITOK);
5434 if (!as) {
5435 REASON_SET(&reason,
5436 PFRES_MEMORY);
5437 goto cleanup;
5438 }
5439
5440 bzero(as, sizeof (*as));
5441 as->compare_lan_ext = pf_ike_compare;
5442 as->compare_ext_gwy = pf_ike_compare;
5443 as->u.ike.cookie = ike.initiator_cookie;
5444 sk->app_state = as;
5445 }
5446 break;
5447 }
5448
5449 default:
5450 break;
5451 }
5452 }
5453 #endif
5454
5455 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
5456 if (pd->proto == IPPROTO_TCP)
5457 pf_normalize_tcp_cleanup(s);
5458 REASON_SET(&reason, PFRES_STATEINS);
5459 pf_src_tree_remove_state(s);
5460 STATE_DEC_COUNTERS(s);
5461 pool_put(&pf_state_pl, s);
5462 return (PF_DROP);
5463 } else
5464 *sm = s;
5465 if (tag > 0) {
5466 pf_tag_ref(tag);
5467 s->tag = tag;
5468 }
5469 if (pd->proto == IPPROTO_TCP &&
5470 (th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
5471 r->keep_state == PF_STATE_SYNPROXY) {
5472 s->src.state = PF_TCPS_PROXY_SRC;
5473 if (nr != NULL) {
5474 #ifndef NO_APPLE_EXTENSIONS
5475 if (direction == PF_OUT) {
5476 pf_change_ap(direction, pd->mp, saddr,
5477 &th->th_sport, pd->ip_sum,
5478 &th->th_sum, &pd->baddr,
5479 bxport.port, 0, af);
5480 sxport.port = th->th_sport;
5481 } else {
5482 pf_change_ap(direction, pd->mp, daddr,
5483 &th->th_dport, pd->ip_sum,
5484 &th->th_sum, &pd->baddr,
5485 bxport.port, 0, af);
5486 sxport.port = th->th_dport;
5487 }
5488 #else
5489 if (direction == PF_OUT) {
5490 pf_change_ap(saddr, &th->th_sport,
5491 pd->ip_sum, &th->th_sum, &pd->baddr,
5492 bport, 0, af);
5493 sport = th->th_sport;
5494 } else {
5495 pf_change_ap(daddr, &th->th_dport,
5496 pd->ip_sum, &th->th_sum, &pd->baddr,
5497 bport, 0, af);
5498 sport = th->th_dport;
5499 }
5500 #endif
5501 }
5502 s->src.seqhi = htonl(random());
5503 /* Find mss option */
5504 mss = pf_get_mss(m, off, th->th_off, af);
5505 mss = pf_calc_mss(saddr, af, mss);
5506 mss = pf_calc_mss(daddr, af, mss);
5507 s->src.mss = mss;
5508 pf_send_tcp(r, af, daddr, saddr, th->th_dport,
5509 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
5510 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
5511 REASON_SET(&reason, PFRES_SYNPROXY);
5512 return (PF_SYNPROXY_DROP);
5513 }
5514
5515 #ifndef NO_APPLE_EXTENSIONS
5516 if (sk->app_state && sk->app_state->handler) {
5517 int offx = off;
5518
5519 switch (pd->proto) {
5520 case IPPROTO_TCP:
5521 offx += th->th_off << 2;
5522 break;
5523 case IPPROTO_UDP:
5524 offx += pd->hdr.udp->uh_ulen << 2;
5525 break;
5526 default:
5527 /* ALG handlers only apply to TCP and UDP rules */
5528 break;
5529 }
5530
5531 if (offx > off) {
5532 sk->app_state->handler(s, direction, offx,
5533 pd, kif);
5534 if (pd->lmw < 0) {
5535 REASON_SET(&reason, PFRES_MEMORY);
5536 return (PF_DROP);
5537 }
5538 m = pd->mp;
5539 }
5540 }
5541 #endif
5542 }
5543
5544 /* copy back packet headers if we performed NAT operations */
5545 #ifndef NO_APPLE_EXTENSIONS
5546 if (rewrite) {
5547 if (rewrite < off + hdrlen)
5548 rewrite = off + hdrlen;
5549
5550 m = pf_lazy_makewritable(pd, pd->mp, rewrite);
5551 if (!m) {
5552 REASON_SET(&reason, PFRES_MEMORY);
5553 return (PF_DROP);
5554 }
5555
5556 m_copyback(m, off, hdrlen, pd->hdr.any);
5557 }
5558 #else
5559 if (rewrite)
5560 m_copyback(m, off, hdrlen, pd->hdr.any);
5561 #endif
5562
5563 return (PF_PASS);
5564 }
5565
5566 static int
5567 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
5568 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
5569 struct pf_ruleset **rsm)
5570 {
5571 #pragma unused(h)
5572 struct pf_rule *r, *a = NULL;
5573 struct pf_ruleset *ruleset = NULL;
5574 sa_family_t af = pd->af;
5575 u_short reason;
5576 int tag = -1;
5577 int asd = 0;
5578 int match = 0;
5579
5580 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
5581 while (r != NULL) {
5582 r->evaluations++;
5583 if (pfi_kif_match(r->kif, kif) == r->ifnot)
5584 r = r->skip[PF_SKIP_IFP].ptr;
5585 else if (r->direction && r->direction != direction)
5586 r = r->skip[PF_SKIP_DIR].ptr;
5587 else if (r->af && r->af != af)
5588 r = r->skip[PF_SKIP_AF].ptr;
5589 else if (r->proto && r->proto != pd->proto)
5590 r = r->skip[PF_SKIP_PROTO].ptr;
5591 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
5592 r->src.neg, kif))
5593 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
5594 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
5595 r->dst.neg, NULL))
5596 r = r->skip[PF_SKIP_DST_ADDR].ptr;
5597 else if (r->tos && !(r->tos == pd->tos))
5598 r = TAILQ_NEXT(r, entries);
5599 else if (r->os_fingerprint != PF_OSFP_ANY)
5600 r = TAILQ_NEXT(r, entries);
5601 #ifndef NO_APPLE_EXTENSIONS
5602 else if (pd->proto == IPPROTO_UDP &&
5603 (r->src.xport.range.op || r->dst.xport.range.op))
5604 r = TAILQ_NEXT(r, entries);
5605 else if (pd->proto == IPPROTO_TCP &&
5606 (r->src.xport.range.op || r->dst.xport.range.op ||
5607 r->flagset))
5608 r = TAILQ_NEXT(r, entries);
5609 #else
5610 else if (pd->proto == IPPROTO_UDP &&
5611 (r->src.port_op || r->dst.port_op))
5612 r = TAILQ_NEXT(r, entries);
5613 else if (pd->proto == IPPROTO_TCP &&
5614 (r->src.port_op || r->dst.port_op || r->flagset))
5615 r = TAILQ_NEXT(r, entries);
5616 #endif
5617 else if ((pd->proto == IPPROTO_ICMP ||
5618 pd->proto == IPPROTO_ICMPV6) &&
5619 (r->type || r->code))
5620 r = TAILQ_NEXT(r, entries);
5621 else if (r->prob && r->prob <= (random() % (UINT_MAX - 1) + 1))
5622 r = TAILQ_NEXT(r, entries);
5623 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
5624 r = TAILQ_NEXT(r, entries);
5625 else {
5626 if (r->anchor == NULL) {
5627 match = 1;
5628 *rm = r;
5629 *am = a;
5630 *rsm = ruleset;
5631 if ((*rm)->quick)
5632 break;
5633 r = TAILQ_NEXT(r, entries);
5634 } else
5635 pf_step_into_anchor(&asd, &ruleset,
5636 PF_RULESET_FILTER, &r, &a, &match);
5637 }
5638 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
5639 PF_RULESET_FILTER, &r, &a, &match))
5640 break;
5641 }
5642 r = *rm;
5643 a = *am;
5644 ruleset = *rsm;
5645
5646 REASON_SET(&reason, PFRES_MATCH);
5647
5648 if (r->log)
5649 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
5650 pd);
5651
5652 if (r->action != PF_PASS)
5653 return (PF_DROP);
5654
5655 if (pf_tag_packet(m, pd->pf_mtag, tag, -1)) {
5656 REASON_SET(&reason, PFRES_MEMORY);
5657 return (PF_DROP);
5658 }
5659
5660 return (PF_PASS);
5661 }
5662
5663 #ifndef NO_APPLE_EXTENSIONS
5664 static void
5665 pf_pptp_handler(struct pf_state *s, int direction, int off,
5666 struct pf_pdesc *pd, struct pfi_kif *kif)
5667 {
5668 #pragma unused(direction)
5669 struct tcphdr *th;
5670 struct pf_pptp_state *as;
5671 struct pf_pptp_ctrl_msg cm;
5672 size_t plen;
5673 struct pf_state *gs;
5674 u_int16_t ct;
5675 u_int16_t *pac_call_id;
5676 u_int16_t *pns_call_id;
5677 u_int16_t *spoof_call_id;
5678 u_int8_t *pac_state;
5679 u_int8_t *pns_state;
5680 enum { PF_PPTP_PASS, PF_PPTP_INSERT_GRE, PF_PPTP_REMOVE_GRE } op;
5681 struct mbuf *m;
5682 struct pf_state_key *sk;
5683 struct pf_state_key *gsk;
5684
5685 m = pd->mp;
5686 plen = min(sizeof (cm), m->m_pkthdr.len - off);
5687 if (plen < PF_PPTP_CTRL_MSG_MINSIZE)
5688 return;
5689
5690 as = &s->state_key->app_state->u.pptp;
5691 m_copydata(m, off, plen, &cm);
5692
5693 if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER)
5694 return;
5695 if (ntohs(cm.hdr.type) != 1)
5696 return;
5697
5698 sk = s->state_key;
5699 gs = as->grev1_state;
5700 if (!gs) {
5701 gs = pool_get(&pf_state_pl, PR_WAITOK);
5702 if (!gs)
5703 return;
5704
5705 memcpy(gs, s, sizeof (*gs));
5706
5707 memset(&gs->entry_id, 0, sizeof (gs->entry_id));
5708 memset(&gs->entry_list, 0, sizeof (gs->entry_list));
5709
5710 TAILQ_INIT(&gs->unlink_hooks);
5711 gs->rt_kif = NULL;
5712 gs->creation = 0;
5713 gs->pfsync_time = 0;
5714 gs->packets[0] = gs->packets[1] = 0;
5715 gs->bytes[0] = gs->bytes[1] = 0;
5716 gs->timeout = PFTM_UNLINKED;
5717 gs->id = gs->creatorid = 0;
5718 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
5719 gs->src.scrub = gs->dst.scrub = 0;
5720
5721 gsk = pf_alloc_state_key(gs);
5722 if (!gsk) {
5723 pool_put(&pf_state_pl, gs);
5724 return;
5725 }
5726
5727 memcpy(&gsk->lan, &sk->lan, sizeof (gsk->lan));
5728 memcpy(&gsk->gwy, &sk->gwy, sizeof (gsk->gwy));
5729 memcpy(&gsk->ext, &sk->ext, sizeof (gsk->ext));
5730 gsk->af = sk->af;
5731 gsk->proto = IPPROTO_GRE;
5732 gsk->proto_variant = PF_GRE_PPTP_VARIANT;
5733 gsk->app_state = 0;
5734 gsk->lan.xport.call_id = 0;
5735 gsk->gwy.xport.call_id = 0;
5736 gsk->ext.xport.call_id = 0;
5737
5738 STATE_INC_COUNTERS(gs);
5739 as->grev1_state = gs;
5740 } else {
5741 gsk = gs->state_key;
5742 }
5743
5744 switch (sk->direction) {
5745 case PF_IN:
5746 pns_call_id = &gsk->ext.xport.call_id;
5747 pns_state = &gs->dst.state;
5748 pac_call_id = &gsk->lan.xport.call_id;
5749 pac_state = &gs->src.state;
5750 break;
5751
5752 case PF_OUT:
5753 pns_call_id = &gsk->lan.xport.call_id;
5754 pns_state = &gs->src.state;
5755 pac_call_id = &gsk->ext.xport.call_id;
5756 pac_state = &gs->dst.state;
5757 break;
5758
5759 default:
5760 DPFPRINTF(PF_DEBUG_URGENT,
5761 ("pf_pptp_handler: bad directional!\n"));
5762 return;
5763 }
5764
5765 spoof_call_id = 0;
5766 op = PF_PPTP_PASS;
5767
5768 ct = ntohs(cm.ctrl.type);
5769
5770 switch (ct) {
5771 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ:
5772 *pns_call_id = cm.msg.call_out_req.call_id;
5773 *pns_state = PFGRE1S_INITIATING;
5774 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5775 spoof_call_id = &cm.msg.call_out_req.call_id;
5776 break;
5777
5778 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY:
5779 *pac_call_id = cm.msg.call_out_rpy.call_id;
5780 if (s->nat_rule.ptr)
5781 spoof_call_id =
5782 (pac_call_id == &gsk->lan.xport.call_id) ?
5783 &cm.msg.call_out_rpy.call_id :
5784 &cm.msg.call_out_rpy.peer_call_id;
5785 if (gs->timeout == PFTM_UNLINKED) {
5786 *pac_state = PFGRE1S_INITIATING;
5787 op = PF_PPTP_INSERT_GRE;
5788 }
5789 break;
5790
5791 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST:
5792 *pns_call_id = cm.msg.call_in_1st.call_id;
5793 *pns_state = PFGRE1S_INITIATING;
5794 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5795 spoof_call_id = &cm.msg.call_in_1st.call_id;
5796 break;
5797
5798 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND:
5799 *pac_call_id = cm.msg.call_in_2nd.call_id;
5800 *pac_state = PFGRE1S_INITIATING;
5801 if (s->nat_rule.ptr)
5802 spoof_call_id =
5803 (pac_call_id == &gsk->lan.xport.call_id) ?
5804 &cm.msg.call_in_2nd.call_id :
5805 &cm.msg.call_in_2nd.peer_call_id;
5806 break;
5807
5808 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD:
5809 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5810 spoof_call_id = &cm.msg.call_in_3rd.call_id;
5811 if (cm.msg.call_in_3rd.call_id != *pns_call_id) {
5812 break;
5813 }
5814 if (gs->timeout == PFTM_UNLINKED)
5815 op = PF_PPTP_INSERT_GRE;
5816 break;
5817
5818 case PF_PPTP_CTRL_TYPE_CALL_CLR:
5819 if (cm.msg.call_clr.call_id != *pns_call_id)
5820 op = PF_PPTP_REMOVE_GRE;
5821 break;
5822
5823 case PF_PPTP_CTRL_TYPE_CALL_DISC:
5824 if (cm.msg.call_clr.call_id != *pac_call_id)
5825 op = PF_PPTP_REMOVE_GRE;
5826 break;
5827
5828 case PF_PPTP_CTRL_TYPE_ERROR:
5829 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5830 spoof_call_id = &cm.msg.error.peer_call_id;
5831 break;
5832
5833 case PF_PPTP_CTRL_TYPE_SET_LINKINFO:
5834 if (s->nat_rule.ptr && pac_call_id == &gsk->lan.xport.call_id)
5835 spoof_call_id = &cm.msg.set_linkinfo.peer_call_id;
5836 break;
5837
5838 default:
5839 op = PF_PPTP_PASS;
5840 break;
5841 }
5842
5843 if (!gsk->gwy.xport.call_id && gsk->lan.xport.call_id) {
5844 gsk->gwy.xport.call_id = gsk->lan.xport.call_id;
5845 if (spoof_call_id) {
5846 u_int16_t call_id = 0;
5847 int n = 0;
5848 struct pf_state_key_cmp key;
5849
5850 key.af = gsk->af;
5851 key.proto = IPPROTO_GRE;
5852 key.proto_variant = PF_GRE_PPTP_VARIANT;
5853 PF_ACPY(&key.gwy.addr, &gsk->gwy.addr, key.af);
5854 PF_ACPY(&key.ext.addr, &gsk->ext.addr, key.af);
5855 key.gwy.xport.call_id = gsk->gwy.xport.call_id;
5856 key.ext.xport.call_id = gsk->ext.xport.call_id;
5857 do {
5858 call_id = htonl(random());
5859 } while (!call_id);
5860
5861 while (pf_find_state_all(&key, PF_IN, 0)) {
5862 call_id = ntohs(call_id);
5863 --call_id;
5864 if (--call_id == 0) call_id = 0xffff;
5865 call_id = htons(call_id);
5866
5867 key.gwy.xport.call_id = call_id;
5868
5869 if (++n > 65535) {
5870 DPFPRINTF(PF_DEBUG_URGENT,
5871 ("pf_pptp_handler: failed to spoof "
5872 "call id\n"));
5873 key.gwy.xport.call_id = 0;
5874 break;
5875 }
5876 }
5877
5878 gsk->gwy.xport.call_id = call_id;
5879 }
5880 }
5881
5882 th = pd->hdr.tcp;
5883
5884 if (spoof_call_id && gsk->lan.xport.call_id != gsk->gwy.xport.call_id) {
5885 if (*spoof_call_id == gsk->gwy.xport.call_id) {
5886 *spoof_call_id = gsk->lan.xport.call_id;
5887 th->th_sum = pf_cksum_fixup(th->th_sum,
5888 gsk->gwy.xport.call_id, gsk->lan.xport.call_id, 0);
5889 } else {
5890 *spoof_call_id = gsk->gwy.xport.call_id;
5891 th->th_sum = pf_cksum_fixup(th->th_sum,
5892 gsk->lan.xport.call_id, gsk->gwy.xport.call_id, 0);
5893 }
5894
5895 m = pf_lazy_makewritable(pd, m, off + plen);
5896 if (!m) {
5897 as->grev1_state = NULL;
5898 STATE_DEC_COUNTERS(gs);
5899 pool_put(&pf_state_pl, gs);
5900 return;
5901 }
5902 m_copyback(m, off, plen, &cm);
5903 }
5904
5905 switch (op) {
5906 case PF_PPTP_REMOVE_GRE:
5907 gs->timeout = PFTM_PURGE;
5908 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
5909 gsk->lan.xport.call_id = 0;
5910 gsk->gwy.xport.call_id = 0;
5911 gsk->ext.xport.call_id = 0;
5912 gs->id = gs->creatorid = 0;
5913 break;
5914
5915 case PF_PPTP_INSERT_GRE:
5916 gs->creation = pf_time_second();
5917 gs->expire = pf_time_second();
5918 gs->timeout = PFTM_GREv1_FIRST_PACKET;
5919 if (gs->src_node != NULL) {
5920 ++gs->src_node->states;
5921 VERIFY(gs->src_node->states != 0);
5922 }
5923 if (gs->nat_src_node != NULL) {
5924 ++gs->nat_src_node->states;
5925 VERIFY(gs->nat_src_node->states != 0);
5926 }
5927 pf_set_rt_ifp(gs, &sk->lan.addr);
5928 if (pf_insert_state(BOUND_IFACE(s->rule.ptr, kif), gs)) {
5929
5930 /*
5931 * <jhw@apple.com>
5932 * FIX ME: insertion can fail when multiple PNS
5933 * behind the same NAT open calls to the same PAC
5934 * simultaneously because spoofed call ID numbers
5935 * are chosen before states are inserted. This is
5936 * hard to fix and happens infrequently enough that
5937 * users will normally try again and this ALG will
5938 * succeed. Failures are expected to be rare enough
5939 * that fixing this is a low priority.
5940 */
5941 as->grev1_state = NULL;
5942 pd->lmw = -1;
5943 pf_src_tree_remove_state(gs);
5944 STATE_DEC_COUNTERS(gs);
5945 pool_put(&pf_state_pl, gs);
5946 DPFPRINTF(PF_DEBUG_URGENT, ("pf_pptp_handler: error "
5947 "inserting GREv1 state.\n"));
5948 }
5949 break;
5950
5951 default:
5952 break;
5953 }
5954 }
5955
5956 static void
5957 pf_pptp_unlink(struct pf_state *s)
5958 {
5959 struct pf_app_state *as = s->state_key->app_state;
5960 struct pf_state *gs = as->u.pptp.grev1_state;
5961
5962 if (gs) {
5963 if (gs->timeout < PFTM_MAX)
5964 gs->timeout = PFTM_PURGE;
5965 as->u.pptp.grev1_state = 0;
5966 }
5967 }
5968
5969 static int
5970 pf_ike_compare(struct pf_app_state *a, struct pf_app_state *b)
5971 {
5972 int64_t d = a->u.ike.cookie - b->u.ike.cookie;
5973 return ((d > 0) ? 1 : ((d < 0) ? -1 : 0));
5974 }
5975 #endif
5976
5977 static int
5978 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
5979 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
5980 u_short *reason)
5981 {
5982 #pragma unused(h)
5983 struct pf_state_key_cmp key;
5984 struct tcphdr *th = pd->hdr.tcp;
5985 u_int16_t win = ntohs(th->th_win);
5986 u_int32_t ack, end, seq, orig_seq;
5987 u_int8_t sws, dws;
5988 int ackskew;
5989 int copyback = 0;
5990 struct pf_state_peer *src, *dst;
5991
5992 #ifndef NO_APPLE_EXTENSIONS
5993 key.app_state = 0;
5994 #endif
5995 key.af = pd->af;
5996 key.proto = IPPROTO_TCP;
5997 if (direction == PF_IN) {
5998 PF_ACPY(&key.ext.addr, pd->src, key.af);
5999 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
6000 #ifndef NO_APPLE_EXTENSIONS
6001 key.ext.xport.port = th->th_sport;
6002 key.gwy.xport.port = th->th_dport;
6003 #else
6004 key.ext.port = th->th_sport;
6005 key.gwy.port = th->th_dport;
6006 #endif
6007 } else {
6008 PF_ACPY(&key.lan.addr, pd->src, key.af);
6009 PF_ACPY(&key.ext.addr, pd->dst, key.af);
6010 #ifndef NO_APPLE_EXTENSIONS
6011 key.lan.xport.port = th->th_sport;
6012 key.ext.xport.port = th->th_dport;
6013 #else
6014 key.lan.port = th->th_sport;
6015 key.ext.port = th->th_dport;
6016 #endif
6017 }
6018
6019 STATE_LOOKUP();
6020
6021 if (direction == (*state)->state_key->direction) {
6022 src = &(*state)->src;
6023 dst = &(*state)->dst;
6024 } else {
6025 src = &(*state)->dst;
6026 dst = &(*state)->src;
6027 }
6028
6029 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
6030 if (direction != (*state)->state_key->direction) {
6031 REASON_SET(reason, PFRES_SYNPROXY);
6032 return (PF_SYNPROXY_DROP);
6033 }
6034 if (th->th_flags & TH_SYN) {
6035 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
6036 REASON_SET(reason, PFRES_SYNPROXY);
6037 return (PF_DROP);
6038 }
6039 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
6040 pd->src, th->th_dport, th->th_sport,
6041 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
6042 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
6043 0, NULL, NULL);
6044 REASON_SET(reason, PFRES_SYNPROXY);
6045 return (PF_SYNPROXY_DROP);
6046 } else if (!(th->th_flags & TH_ACK) ||
6047 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
6048 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
6049 REASON_SET(reason, PFRES_SYNPROXY);
6050 return (PF_DROP);
6051 } else if ((*state)->src_node != NULL &&
6052 pf_src_connlimit(state)) {
6053 REASON_SET(reason, PFRES_SRCLIMIT);
6054 return (PF_DROP);
6055 } else
6056 (*state)->src.state = PF_TCPS_PROXY_DST;
6057 }
6058 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
6059 struct pf_state_host *psrc, *pdst;
6060
6061 if (direction == PF_OUT) {
6062 psrc = &(*state)->state_key->gwy;
6063 pdst = &(*state)->state_key->ext;
6064 } else {
6065 psrc = &(*state)->state_key->ext;
6066 pdst = &(*state)->state_key->lan;
6067 }
6068 if (direction == (*state)->state_key->direction) {
6069 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
6070 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
6071 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
6072 REASON_SET(reason, PFRES_SYNPROXY);
6073 return (PF_DROP);
6074 }
6075 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
6076 if ((*state)->dst.seqhi == 1)
6077 (*state)->dst.seqhi = htonl(random());
6078 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
6079 #ifndef NO_APPLE_EXTENSIONS
6080 &pdst->addr, psrc->xport.port, pdst->xport.port,
6081 #else
6082 &pdst->addr, psrc->port, pdst->port,
6083 #endif
6084 (*state)->dst.seqhi, 0, TH_SYN, 0,
6085 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
6086 REASON_SET(reason, PFRES_SYNPROXY);
6087 return (PF_SYNPROXY_DROP);
6088 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
6089 (TH_SYN|TH_ACK)) ||
6090 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
6091 REASON_SET(reason, PFRES_SYNPROXY);
6092 return (PF_DROP);
6093 } else {
6094 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
6095 (*state)->dst.seqlo = ntohl(th->th_seq);
6096 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
6097 pd->src, th->th_dport, th->th_sport,
6098 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
6099 TH_ACK, (*state)->src.max_win, 0, 0, 0,
6100 (*state)->tag, NULL, NULL);
6101 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
6102 #ifndef NO_APPLE_EXTENSIONS
6103 &pdst->addr, psrc->xport.port, pdst->xport.port,
6104 #else
6105 &pdst->addr, psrc->port, pdst->port,
6106 #endif
6107 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
6108 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
6109 0, NULL, NULL);
6110 (*state)->src.seqdiff = (*state)->dst.seqhi -
6111 (*state)->src.seqlo;
6112 (*state)->dst.seqdiff = (*state)->src.seqhi -
6113 (*state)->dst.seqlo;
6114 (*state)->src.seqhi = (*state)->src.seqlo +
6115 (*state)->dst.max_win;
6116 (*state)->dst.seqhi = (*state)->dst.seqlo +
6117 (*state)->src.max_win;
6118 (*state)->src.wscale = (*state)->dst.wscale = 0;
6119 (*state)->src.state = (*state)->dst.state =
6120 TCPS_ESTABLISHED;
6121 REASON_SET(reason, PFRES_SYNPROXY);
6122 return (PF_SYNPROXY_DROP);
6123 }
6124 }
6125
6126 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
6127 dst->state >= TCPS_FIN_WAIT_2 &&
6128 src->state >= TCPS_FIN_WAIT_2) {
6129 if (pf_status.debug >= PF_DEBUG_MISC) {
6130 printf("pf: state reuse ");
6131 pf_print_state(*state);
6132 pf_print_flags(th->th_flags);
6133 printf("\n");
6134 }
6135 /* XXX make sure it's the same direction ?? */
6136 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
6137 pf_unlink_state(*state);
6138 *state = NULL;
6139 return (PF_DROP);
6140 }
6141
6142 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
6143 sws = src->wscale & PF_WSCALE_MASK;
6144 dws = dst->wscale & PF_WSCALE_MASK;
6145 } else
6146 sws = dws = 0;
6147
6148 /*
6149 * Sequence tracking algorithm from Guido van Rooij's paper:
6150 * http://www.madison-gurkha.com/publications/tcp_filtering/
6151 * tcp_filtering.ps
6152 */
6153
6154 orig_seq = seq = ntohl(th->th_seq);
6155 if (src->seqlo == 0) {
6156 /* First packet from this end. Set its state */
6157
6158 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
6159 src->scrub == NULL) {
6160 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
6161 REASON_SET(reason, PFRES_MEMORY);
6162 return (PF_DROP);
6163 }
6164 }
6165
6166 /* Deferred generation of sequence number modulator */
6167 if (dst->seqdiff && !src->seqdiff) {
6168 /* use random iss for the TCP server */
6169 while ((src->seqdiff = random() - seq) == 0)
6170 ;
6171 ack = ntohl(th->th_ack) - dst->seqdiff;
6172 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6173 src->seqdiff), 0);
6174 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6175 copyback = off + sizeof (*th);
6176 } else {
6177 ack = ntohl(th->th_ack);
6178 }
6179
6180 end = seq + pd->p_len;
6181 if (th->th_flags & TH_SYN) {
6182 end++;
6183 if (dst->wscale & PF_WSCALE_FLAG) {
6184 src->wscale = pf_get_wscale(m, off, th->th_off,
6185 pd->af);
6186 if (src->wscale & PF_WSCALE_FLAG) {
6187 /*
6188 * Remove scale factor from initial
6189 * window
6190 */
6191 sws = src->wscale & PF_WSCALE_MASK;
6192 win = ((u_int32_t)win + (1 << sws) - 1)
6193 >> sws;
6194 dws = dst->wscale & PF_WSCALE_MASK;
6195 } else {
6196 #ifndef NO_APPLE_MODIFICATION
6197 /*
6198 * <rdar://5786370>
6199 *
6200 * Window scale negotiation has failed,
6201 * therefore we must restore the window
6202 * scale in the state record that we
6203 * optimistically removed in
6204 * pf_test_rule(). Care is required to
6205 * prevent arithmetic overflow from
6206 * zeroing the window when it's
6207 * truncated down to 16-bits. --jhw
6208 */
6209 u_int32_t _win = dst->max_win;
6210 _win <<= dst->wscale & PF_WSCALE_MASK;
6211 dst->max_win = MIN(0xffff, _win);
6212 #else
6213 /* fixup other window */
6214 dst->max_win <<= dst->wscale &
6215 PF_WSCALE_MASK;
6216 #endif
6217 /* in case of a retrans SYN|ACK */
6218 dst->wscale = 0;
6219 }
6220 }
6221 }
6222 if (th->th_flags & TH_FIN)
6223 end++;
6224
6225 src->seqlo = seq;
6226 if (src->state < TCPS_SYN_SENT)
6227 src->state = TCPS_SYN_SENT;
6228
6229 /*
6230 * May need to slide the window (seqhi may have been set by
6231 * the crappy stack check or if we picked up the connection
6232 * after establishment)
6233 */
6234 #ifndef NO_APPLE_MODIFICATIONS
6235 if (src->seqhi == 1 ||
6236 SEQ_GEQ(end + MAX(1, (u_int32_t)dst->max_win << dws),
6237 src->seqhi))
6238 src->seqhi = end + MAX(1, (u_int32_t)dst->max_win << dws);
6239 #else
6240 if (src->seqhi == 1 ||
6241 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
6242 src->seqhi = end + MAX(1, dst->max_win << dws);
6243 #endif
6244 if (win > src->max_win)
6245 src->max_win = win;
6246
6247 } else {
6248 ack = ntohl(th->th_ack) - dst->seqdiff;
6249 if (src->seqdiff) {
6250 /* Modulate sequence numbers */
6251 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6252 src->seqdiff), 0);
6253 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6254 copyback = off+ sizeof (*th);
6255 }
6256 end = seq + pd->p_len;
6257 if (th->th_flags & TH_SYN)
6258 end++;
6259 if (th->th_flags & TH_FIN)
6260 end++;
6261 }
6262
6263 if ((th->th_flags & TH_ACK) == 0) {
6264 /* Let it pass through the ack skew check */
6265 ack = dst->seqlo;
6266 } else if ((ack == 0 &&
6267 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
6268 /* broken tcp stacks do not set ack */
6269 (dst->state < TCPS_SYN_SENT)) {
6270 /*
6271 * Many stacks (ours included) will set the ACK number in an
6272 * FIN|ACK if the SYN times out -- no sequence to ACK.
6273 */
6274 ack = dst->seqlo;
6275 }
6276
6277 if (seq == end) {
6278 /* Ease sequencing restrictions on no data packets */
6279 seq = src->seqlo;
6280 end = seq;
6281 }
6282
6283 ackskew = dst->seqlo - ack;
6284
6285
6286 /*
6287 * Need to demodulate the sequence numbers in any TCP SACK options
6288 * (Selective ACK). We could optionally validate the SACK values
6289 * against the current ACK window, either forwards or backwards, but
6290 * I'm not confident that SACK has been implemented properly
6291 * everywhere. It wouldn't surprise me if several stacks accidently
6292 * SACK too far backwards of previously ACKed data. There really aren't
6293 * any security implications of bad SACKing unless the target stack
6294 * doesn't validate the option length correctly. Someone trying to
6295 * spoof into a TCP connection won't bother blindly sending SACK
6296 * options anyway.
6297 */
6298 if (dst->seqdiff && (th->th_off << 2) > (int)sizeof (struct tcphdr)) {
6299 #ifndef NO_APPLE_EXTENSIONS
6300 copyback = pf_modulate_sack(m, off, pd, th, dst);
6301 if (copyback == -1) {
6302 REASON_SET(reason, PFRES_MEMORY);
6303 return (PF_DROP);
6304 }
6305
6306 m = pd->mp;
6307 #else
6308 if (pf_modulate_sack(m, off, pd, th, dst))
6309 copyback = 1;
6310 #endif
6311 }
6312
6313
6314 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6315 if (SEQ_GEQ(src->seqhi, end) &&
6316 /* Last octet inside other's window space */
6317 #ifndef NO_APPLE_MODIFICATIONS
6318 SEQ_GEQ(seq, src->seqlo - ((u_int32_t)dst->max_win << dws)) &&
6319 #else
6320 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
6321 #endif
6322 /* Retrans: not more than one window back */
6323 (ackskew >= -MAXACKWINDOW) &&
6324 /* Acking not more than one reassembled fragment backwards */
6325 (ackskew <= (MAXACKWINDOW << sws)) &&
6326 /* Acking not more than one window forward */
6327 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
6328 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
6329 (pd->flags & PFDESC_IP_REAS) == 0)) {
6330 /* Require an exact/+1 sequence match on resets when possible */
6331
6332 if (dst->scrub || src->scrub) {
6333 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
6334 *state, src, dst, &copyback))
6335 return (PF_DROP);
6336
6337 #ifndef NO_APPLE_EXTENSIONS
6338 m = pd->mp;
6339 #endif
6340 }
6341
6342 /* update max window */
6343 if (src->max_win < win)
6344 src->max_win = win;
6345 /* synchronize sequencing */
6346 if (SEQ_GT(end, src->seqlo))
6347 src->seqlo = end;
6348 /* slide the window of what the other end can send */
6349 #ifndef NO_APPLE_MODIFICATIONS
6350 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6351 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
6352 #else
6353 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
6354 dst->seqhi = ack + MAX((win << sws), 1);
6355 #endif
6356
6357 /* update states */
6358 if (th->th_flags & TH_SYN)
6359 if (src->state < TCPS_SYN_SENT)
6360 src->state = TCPS_SYN_SENT;
6361 if (th->th_flags & TH_FIN)
6362 if (src->state < TCPS_CLOSING)
6363 src->state = TCPS_CLOSING;
6364 if (th->th_flags & TH_ACK) {
6365 if (dst->state == TCPS_SYN_SENT) {
6366 dst->state = TCPS_ESTABLISHED;
6367 if (src->state == TCPS_ESTABLISHED &&
6368 (*state)->src_node != NULL &&
6369 pf_src_connlimit(state)) {
6370 REASON_SET(reason, PFRES_SRCLIMIT);
6371 return (PF_DROP);
6372 }
6373 } else if (dst->state == TCPS_CLOSING)
6374 dst->state = TCPS_FIN_WAIT_2;
6375 }
6376 if (th->th_flags & TH_RST)
6377 src->state = dst->state = TCPS_TIME_WAIT;
6378
6379 /* update expire time */
6380 (*state)->expire = pf_time_second();
6381 if (src->state >= TCPS_FIN_WAIT_2 &&
6382 dst->state >= TCPS_FIN_WAIT_2)
6383 (*state)->timeout = PFTM_TCP_CLOSED;
6384 else if (src->state >= TCPS_CLOSING &&
6385 dst->state >= TCPS_CLOSING)
6386 (*state)->timeout = PFTM_TCP_FIN_WAIT;
6387 else if (src->state < TCPS_ESTABLISHED ||
6388 dst->state < TCPS_ESTABLISHED)
6389 (*state)->timeout = PFTM_TCP_OPENING;
6390 else if (src->state >= TCPS_CLOSING ||
6391 dst->state >= TCPS_CLOSING)
6392 (*state)->timeout = PFTM_TCP_CLOSING;
6393 else
6394 (*state)->timeout = PFTM_TCP_ESTABLISHED;
6395
6396 /* Fall through to PASS packet */
6397
6398 } else if ((dst->state < TCPS_SYN_SENT ||
6399 dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) &&
6400 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
6401 /* Within a window forward of the originating packet */
6402 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
6403 /* Within a window backward of the originating packet */
6404
6405 /*
6406 * This currently handles three situations:
6407 * 1) Stupid stacks will shotgun SYNs before their peer
6408 * replies.
6409 * 2) When PF catches an already established stream (the
6410 * firewall rebooted, the state table was flushed, routes
6411 * changed...)
6412 * 3) Packets get funky immediately after the connection
6413 * closes (this should catch Solaris spurious ACK|FINs
6414 * that web servers like to spew after a close)
6415 *
6416 * This must be a little more careful than the above code
6417 * since packet floods will also be caught here. We don't
6418 * update the TTL here to mitigate the damage of a packet
6419 * flood and so the same code can handle awkward establishment
6420 * and a loosened connection close.
6421 * In the establishment case, a correct peer response will
6422 * validate the connection, go through the normal state code
6423 * and keep updating the state TTL.
6424 */
6425
6426 if (pf_status.debug >= PF_DEBUG_MISC) {
6427 printf("pf: loose state match: ");
6428 pf_print_state(*state);
6429 pf_print_flags(th->th_flags);
6430 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6431 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
6432 pd->p_len, ackskew, (*state)->packets[0],
6433 (*state)->packets[1],
6434 direction == PF_IN ? "in" : "out",
6435 direction == (*state)->state_key->direction ?
6436 "fwd" : "rev");
6437 }
6438
6439 if (dst->scrub || src->scrub) {
6440 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
6441 *state, src, dst, &copyback))
6442 return (PF_DROP);
6443 #ifndef NO_APPLE_EXTENSIONS
6444 m = pd->mp;
6445 #endif
6446 }
6447
6448 /* update max window */
6449 if (src->max_win < win)
6450 src->max_win = win;
6451 /* synchronize sequencing */
6452 if (SEQ_GT(end, src->seqlo))
6453 src->seqlo = end;
6454 /* slide the window of what the other end can send */
6455 #ifndef NO_APPLE_MODIFICATIONS
6456 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6457 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
6458 #else
6459 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
6460 dst->seqhi = ack + MAX((win << sws), 1);
6461 #endif
6462
6463 /*
6464 * Cannot set dst->seqhi here since this could be a shotgunned
6465 * SYN and not an already established connection.
6466 */
6467
6468 if (th->th_flags & TH_FIN)
6469 if (src->state < TCPS_CLOSING)
6470 src->state = TCPS_CLOSING;
6471 if (th->th_flags & TH_RST)
6472 src->state = dst->state = TCPS_TIME_WAIT;
6473
6474 /* Fall through to PASS packet */
6475
6476 } else {
6477 if ((*state)->dst.state == TCPS_SYN_SENT &&
6478 (*state)->src.state == TCPS_SYN_SENT) {
6479 /* Send RST for state mismatches during handshake */
6480 if (!(th->th_flags & TH_RST))
6481 pf_send_tcp((*state)->rule.ptr, pd->af,
6482 pd->dst, pd->src, th->th_dport,
6483 th->th_sport, ntohl(th->th_ack), 0,
6484 TH_RST, 0, 0,
6485 (*state)->rule.ptr->return_ttl, 1, 0,
6486 pd->eh, kif->pfik_ifp);
6487 src->seqlo = 0;
6488 src->seqhi = 1;
6489 src->max_win = 1;
6490 } else if (pf_status.debug >= PF_DEBUG_MISC) {
6491 printf("pf: BAD state: ");
6492 pf_print_state(*state);
6493 pf_print_flags(th->th_flags);
6494 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6495 "pkts=%llu:%llu dir=%s,%s\n",
6496 seq, orig_seq, ack, pd->p_len, ackskew,
6497 (*state)->packets[0], (*state)->packets[1],
6498 direction == PF_IN ? "in" : "out",
6499 direction == (*state)->state_key->direction ?
6500 "fwd" : "rev");
6501 printf("pf: State failure on: %c %c %c %c | %c %c\n",
6502 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
6503 #ifndef NO_APPLE_MODIFICATIONS
6504 SEQ_GEQ(seq,
6505 src->seqlo - ((u_int32_t)dst->max_win << dws)) ?
6506 #else
6507 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
6508 #endif
6509 ' ': '2',
6510 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
6511 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
6512 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
6513 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
6514 }
6515 REASON_SET(reason, PFRES_BADSTATE);
6516 return (PF_DROP);
6517 }
6518
6519 /* Any packets which have gotten here are to be passed */
6520
6521 #ifndef NO_APPLE_EXTENSIONS
6522 if ((*state)->state_key->app_state &&
6523 (*state)->state_key->app_state->handler) {
6524 (*state)->state_key->app_state->handler(*state, direction,
6525 off + (th->th_off << 2), pd, kif);
6526 if (pd->lmw < 0) {
6527 REASON_SET(reason, PFRES_MEMORY);
6528 return (PF_DROP);
6529 }
6530 m = pd->mp;
6531 }
6532
6533 /* translate source/destination address, if necessary */
6534 if (STATE_TRANSLATE((*state)->state_key)) {
6535 if (direction == PF_OUT)
6536 pf_change_ap(direction, pd->mp, pd->src, &th->th_sport,
6537 pd->ip_sum, &th->th_sum,
6538 &(*state)->state_key->gwy.addr,
6539 (*state)->state_key->gwy.xport.port, 0, pd->af);
6540 else
6541 pf_change_ap(direction, pd->mp, pd->dst, &th->th_dport,
6542 pd->ip_sum, &th->th_sum,
6543 &(*state)->state_key->lan.addr,
6544 (*state)->state_key->lan.xport.port, 0, pd->af);
6545 copyback = off + sizeof (*th);
6546 }
6547
6548 if (copyback) {
6549 m = pf_lazy_makewritable(pd, m, copyback);
6550 if (!m) {
6551 REASON_SET(reason, PFRES_MEMORY);
6552 return (PF_DROP);
6553 }
6554
6555 /* Copyback sequence modulation or stateful scrub changes */
6556 m_copyback(m, off, sizeof (*th), th);
6557 }
6558 #else
6559 /* translate source/destination address, if necessary */
6560 if (STATE_TRANSLATE((*state)->state_key)) {
6561 if (direction == PF_OUT)
6562 pf_change_ap(pd->src, pd->mp, &th->th_sport, pd->ip_sum,
6563 &th->th_sum, &(*state)->state_key->gwy.addr,
6564 (*state)->state_key->gwy.port, 0, pd->af);
6565 else
6566 pf_change_ap(pd->dst, pd->mp, &th->th_dport, pd->ip_sum,
6567 &th->th_sum, &(*state)->state_key->lan.addr,
6568 (*state)->state_key->lan.port, 0, pd->af);
6569 m_copyback(m, off, sizeof (*th), th);
6570 } else if (copyback) {
6571 /* Copyback sequence modulation or stateful scrub changes */
6572 m_copyback(m, off, sizeof (*th), th);
6573 }
6574 #endif
6575
6576 return (PF_PASS);
6577 }
6578
6579 static int
6580 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
6581 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
6582 {
6583 #pragma unused(h)
6584 struct pf_state_peer *src, *dst;
6585 struct pf_state_key_cmp key;
6586 struct udphdr *uh = pd->hdr.udp;
6587 #ifndef NO_APPLE_EXTENSIONS
6588 struct pf_app_state as;
6589 int dx, action, extfilter;
6590 key.app_state = 0;
6591 key.proto_variant = PF_EXTFILTER_APD;
6592 #endif
6593
6594 key.af = pd->af;
6595 key.proto = IPPROTO_UDP;
6596 if (direction == PF_IN) {
6597 PF_ACPY(&key.ext.addr, pd->src, key.af);
6598 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
6599 #ifndef NO_APPLE_EXTENSIONS
6600 key.ext.xport.port = uh->uh_sport;
6601 key.gwy.xport.port = uh->uh_dport;
6602 dx = PF_IN;
6603 #else
6604 key.ext.port = uh->uh_sport;
6605 key.gwy.port = uh->uh_dport;
6606 #endif
6607 } else {
6608 PF_ACPY(&key.lan.addr, pd->src, key.af);
6609 PF_ACPY(&key.ext.addr, pd->dst, key.af);
6610 #ifndef NO_APPLE_EXTENSIONS
6611 key.lan.xport.port = uh->uh_sport;
6612 key.ext.xport.port = uh->uh_dport;
6613 dx = PF_OUT;
6614 #else
6615 key.lan.port = uh->uh_sport;
6616 key.ext.port = uh->uh_dport;
6617 #endif
6618 }
6619
6620 #ifndef NO_APPLE_EXTENSIONS
6621 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
6622 ntohs(uh->uh_dport) == PF_IKE_PORT) {
6623 struct pf_ike_hdr ike;
6624 size_t plen = m->m_pkthdr.len - off - sizeof (*uh);
6625 if (plen < PF_IKE_PACKET_MINSIZE) {
6626 DPFPRINTF(PF_DEBUG_MISC,
6627 ("pf: IKE message too small.\n"));
6628 return (PF_DROP);
6629 }
6630
6631 if (plen > sizeof (ike))
6632 plen = sizeof (ike);
6633 m_copydata(m, off + sizeof (*uh), plen, &ike);
6634
6635 if (ike.initiator_cookie) {
6636 key.app_state = &as;
6637 as.compare_lan_ext = pf_ike_compare;
6638 as.compare_ext_gwy = pf_ike_compare;
6639 as.u.ike.cookie = ike.initiator_cookie;
6640 } else {
6641 /*
6642 * <http://tools.ietf.org/html/\
6643 * draft-ietf-ipsec-nat-t-ike-01>
6644 * Support non-standard NAT-T implementations that
6645 * push the ESP packet over the top of the IKE packet.
6646 * Do not drop packet.
6647 */
6648 DPFPRINTF(PF_DEBUG_MISC,
6649 ("pf: IKE initiator cookie = 0.\n"));
6650 }
6651 }
6652
6653 *state = pf_find_state(kif, &key, dx);
6654
6655 if (!key.app_state && *state == 0) {
6656 key.proto_variant = PF_EXTFILTER_AD;
6657 *state = pf_find_state(kif, &key, dx);
6658 }
6659
6660 if (!key.app_state && *state == 0) {
6661 key.proto_variant = PF_EXTFILTER_EI;
6662 *state = pf_find_state(kif, &key, dx);
6663 }
6664
6665 if (pf_state_lookup_aux(state, kif, direction, &action))
6666 return (action);
6667 #else
6668 STATE_LOOKUP();
6669 #endif
6670
6671 if (direction == (*state)->state_key->direction) {
6672 src = &(*state)->src;
6673 dst = &(*state)->dst;
6674 } else {
6675 src = &(*state)->dst;
6676 dst = &(*state)->src;
6677 }
6678
6679 /* update states */
6680 if (src->state < PFUDPS_SINGLE)
6681 src->state = PFUDPS_SINGLE;
6682 if (dst->state == PFUDPS_SINGLE)
6683 dst->state = PFUDPS_MULTIPLE;
6684
6685 /* update expire time */
6686 (*state)->expire = pf_time_second();
6687 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
6688 (*state)->timeout = PFTM_UDP_MULTIPLE;
6689 else
6690 (*state)->timeout = PFTM_UDP_SINGLE;
6691
6692 #ifndef NO_APPLE_EXTENSIONS
6693 extfilter = (*state)->state_key->proto_variant;
6694 if (extfilter > PF_EXTFILTER_APD) {
6695 (*state)->state_key->ext.xport.port = key.ext.xport.port;
6696 if (extfilter > PF_EXTFILTER_AD)
6697 PF_ACPY(&(*state)->state_key->ext.addr,
6698 &key.ext.addr, key.af);
6699 }
6700
6701 if ((*state)->state_key->app_state &&
6702 (*state)->state_key->app_state->handler) {
6703 (*state)->state_key->app_state->handler(*state, direction,
6704 off + uh->uh_ulen, pd, kif);
6705 if (pd->lmw < 0) {
6706 REASON_SET(reason, PFRES_MEMORY);
6707 return (PF_DROP);
6708 }
6709 m = pd->mp;
6710 }
6711 #endif
6712
6713 /* translate source/destination address, if necessary */
6714 #ifndef NO_APPLE_EXTENSIONS
6715 if (STATE_TRANSLATE((*state)->state_key)) {
6716 m = pf_lazy_makewritable(pd, m, off + sizeof (*uh));
6717 if (!m)
6718 return (PF_DROP);
6719
6720 if (direction == PF_OUT)
6721 pf_change_ap(direction, pd->mp, pd->src, &uh->uh_sport,
6722 pd->ip_sum, &uh->uh_sum,
6723 &(*state)->state_key->gwy.addr,
6724 (*state)->state_key->gwy.xport.port, 1, pd->af);
6725 else
6726 pf_change_ap(direction, pd->mp, pd->dst, &uh->uh_dport,
6727 pd->ip_sum, &uh->uh_sum,
6728 &(*state)->state_key->lan.addr,
6729 (*state)->state_key->lan.xport.port, 1, pd->af);
6730 m_copyback(m, off, sizeof (*uh), uh);
6731 }
6732 #else
6733 if (STATE_TRANSLATE((*state)->state_key)) {
6734 if (direction == PF_OUT)
6735 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
6736 &uh->uh_sum, &(*state)->state_key->gwy.addr,
6737 (*state)->state_key->gwy.port, 1, pd->af);
6738 else
6739 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
6740 &uh->uh_sum, &(*state)->state_key->lan.addr,
6741 (*state)->state_key->lan.port, 1, pd->af);
6742 m_copyback(m, off, sizeof (*uh), uh);
6743 }
6744 #endif
6745
6746 return (PF_PASS);
6747 }
6748
6749 static int
6750 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
6751 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
6752 {
6753 #pragma unused(h)
6754 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
6755 u_int16_t icmpid = 0, *icmpsum;
6756 u_int8_t icmptype;
6757 int state_icmp = 0;
6758 struct pf_state_key_cmp key;
6759
6760 #ifndef NO_APPLE_EXTENSIONS
6761 struct pf_app_state as;
6762 key.app_state = 0;
6763 #endif
6764
6765 switch (pd->proto) {
6766 #if INET
6767 case IPPROTO_ICMP:
6768 icmptype = pd->hdr.icmp->icmp_type;
6769 icmpid = pd->hdr.icmp->icmp_id;
6770 icmpsum = &pd->hdr.icmp->icmp_cksum;
6771
6772 if (icmptype == ICMP_UNREACH ||
6773 icmptype == ICMP_SOURCEQUENCH ||
6774 icmptype == ICMP_REDIRECT ||
6775 icmptype == ICMP_TIMXCEED ||
6776 icmptype == ICMP_PARAMPROB)
6777 state_icmp++;
6778 break;
6779 #endif /* INET */
6780 #if INET6
6781 case IPPROTO_ICMPV6:
6782 icmptype = pd->hdr.icmp6->icmp6_type;
6783 icmpid = pd->hdr.icmp6->icmp6_id;
6784 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
6785
6786 if (icmptype == ICMP6_DST_UNREACH ||
6787 icmptype == ICMP6_PACKET_TOO_BIG ||
6788 icmptype == ICMP6_TIME_EXCEEDED ||
6789 icmptype == ICMP6_PARAM_PROB)
6790 state_icmp++;
6791 break;
6792 #endif /* INET6 */
6793 }
6794
6795 if (!state_icmp) {
6796
6797 /*
6798 * ICMP query/reply message not related to a TCP/UDP packet.
6799 * Search for an ICMP state.
6800 */
6801 key.af = pd->af;
6802 key.proto = pd->proto;
6803 if (direction == PF_IN) {
6804 PF_ACPY(&key.ext.addr, pd->src, key.af);
6805 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
6806 #ifndef NO_APPLE_EXTENSIONS
6807 key.ext.xport.port = 0;
6808 key.gwy.xport.port = icmpid;
6809 #else
6810 key.ext.port = 0;
6811 key.gwy.port = icmpid;
6812 #endif
6813 } else {
6814 PF_ACPY(&key.lan.addr, pd->src, key.af);
6815 PF_ACPY(&key.ext.addr, pd->dst, key.af);
6816 #ifndef NO_APPLE_EXTENSIONS
6817 key.lan.xport.port = icmpid;
6818 key.ext.xport.port = 0;
6819 #else
6820 key.lan.port = icmpid;
6821 key.ext.port = 0;
6822 #endif
6823 }
6824
6825 STATE_LOOKUP();
6826
6827 (*state)->expire = pf_time_second();
6828 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
6829
6830 /* translate source/destination address, if necessary */
6831 if (STATE_TRANSLATE((*state)->state_key)) {
6832 if (direction == PF_OUT) {
6833 switch (pd->af) {
6834 #if INET
6835 case AF_INET:
6836 pf_change_a(&saddr->v4.s_addr,
6837 pd->ip_sum,
6838 (*state)->state_key->gwy.addr.v4.s_addr, 0);
6839 #ifndef NO_APPLE_EXTENSIONS
6840 pd->hdr.icmp->icmp_cksum =
6841 pf_cksum_fixup(
6842 pd->hdr.icmp->icmp_cksum, icmpid,
6843 (*state)->state_key->gwy.xport.port, 0);
6844 pd->hdr.icmp->icmp_id =
6845 (*state)->state_key->gwy.xport.port;
6846 m = pf_lazy_makewritable(pd, m,
6847 off + ICMP_MINLEN);
6848 if (!m)
6849 return (PF_DROP);
6850 #else
6851 pd->hdr.icmp->icmp_cksum =
6852 pf_cksum_fixup(
6853 pd->hdr.icmp->icmp_cksum, icmpid,
6854 (*state)->state_key->gwy.port, 0);
6855 pd->hdr.icmp->icmp_id =
6856 (*state)->state_key->gwy.port;
6857 #endif
6858 m_copyback(m, off, ICMP_MINLEN,
6859 pd->hdr.icmp);
6860 break;
6861 #endif /* INET */
6862 #if INET6
6863 case AF_INET6:
6864 pf_change_a6(saddr,
6865 &pd->hdr.icmp6->icmp6_cksum,
6866 &(*state)->state_key->gwy.addr, 0);
6867 #ifndef NO_APPLE_EXTENSIONS
6868 m = pf_lazy_makewritable(pd, m,
6869 off + sizeof (struct icmp6_hdr));
6870 if (!m)
6871 return (PF_DROP);
6872 #endif
6873 m_copyback(m, off,
6874 sizeof (struct icmp6_hdr),
6875 pd->hdr.icmp6);
6876 break;
6877 #endif /* INET6 */
6878 }
6879 } else {
6880 switch (pd->af) {
6881 #if INET
6882 case AF_INET:
6883 pf_change_a(&daddr->v4.s_addr,
6884 pd->ip_sum,
6885 (*state)->state_key->lan.addr.v4.s_addr, 0);
6886 #ifndef NO_APPLE_EXTENSIONS
6887 pd->hdr.icmp->icmp_cksum =
6888 pf_cksum_fixup(
6889 pd->hdr.icmp->icmp_cksum, icmpid,
6890 (*state)->state_key->lan.xport.port, 0);
6891 pd->hdr.icmp->icmp_id =
6892 (*state)->state_key->lan.xport.port;
6893 m = pf_lazy_makewritable(pd, m,
6894 off + ICMP_MINLEN);
6895 if (!m)
6896 return (PF_DROP);
6897 #else
6898 pd->hdr.icmp->icmp_cksum =
6899 pf_cksum_fixup(
6900 pd->hdr.icmp->icmp_cksum, icmpid,
6901 (*state)->state_key->lan.port, 0);
6902 pd->hdr.icmp->icmp_id =
6903 (*state)->state_key->lan.port;
6904 #endif
6905 m_copyback(m, off, ICMP_MINLEN,
6906 pd->hdr.icmp);
6907 break;
6908 #endif /* INET */
6909 #if INET6
6910 case AF_INET6:
6911 pf_change_a6(daddr,
6912 &pd->hdr.icmp6->icmp6_cksum,
6913 &(*state)->state_key->lan.addr, 0);
6914 #ifndef NO_APPLE_EXTENSIONS
6915 m = pf_lazy_makewritable(pd, m,
6916 off + sizeof (struct icmp6_hdr));
6917 if (!m)
6918 return (PF_DROP);
6919 #endif
6920 m_copyback(m, off,
6921 sizeof (struct icmp6_hdr),
6922 pd->hdr.icmp6);
6923 break;
6924 #endif /* INET6 */
6925 }
6926 }
6927 }
6928
6929 return (PF_PASS);
6930
6931 } else {
6932 /*
6933 * ICMP error message in response to a TCP/UDP packet.
6934 * Extract the inner TCP/UDP header and search for that state.
6935 */
6936
6937 struct pf_pdesc pd2;
6938 #if INET
6939 struct ip h2;
6940 #endif /* INET */
6941 #if INET6
6942 struct ip6_hdr h2_6;
6943 int terminal = 0;
6944 #endif /* INET6 */
6945 int ipoff2 = 0;
6946 int off2 = 0;
6947
6948 memset(&pd2, 0, sizeof (pd2));
6949
6950 pd2.af = pd->af;
6951 switch (pd->af) {
6952 #if INET
6953 case AF_INET:
6954 /* offset of h2 in mbuf chain */
6955 ipoff2 = off + ICMP_MINLEN;
6956
6957 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof (h2),
6958 NULL, reason, pd2.af)) {
6959 DPFPRINTF(PF_DEBUG_MISC,
6960 ("pf: ICMP error message too short "
6961 "(ip)\n"));
6962 return (PF_DROP);
6963 }
6964 /*
6965 * ICMP error messages don't refer to non-first
6966 * fragments
6967 */
6968 if (h2.ip_off & htons(IP_OFFMASK)) {
6969 REASON_SET(reason, PFRES_FRAG);
6970 return (PF_DROP);
6971 }
6972
6973 /* offset of protocol header that follows h2 */
6974 off2 = ipoff2 + (h2.ip_hl << 2);
6975
6976 pd2.proto = h2.ip_p;
6977 pd2.src = (struct pf_addr *)&h2.ip_src;
6978 pd2.dst = (struct pf_addr *)&h2.ip_dst;
6979 pd2.ip_sum = &h2.ip_sum;
6980 break;
6981 #endif /* INET */
6982 #if INET6
6983 case AF_INET6:
6984 ipoff2 = off + sizeof (struct icmp6_hdr);
6985
6986 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof (h2_6),
6987 NULL, reason, pd2.af)) {
6988 DPFPRINTF(PF_DEBUG_MISC,
6989 ("pf: ICMP error message too short "
6990 "(ip6)\n"));
6991 return (PF_DROP);
6992 }
6993 pd2.proto = h2_6.ip6_nxt;
6994 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
6995 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
6996 pd2.ip_sum = NULL;
6997 off2 = ipoff2 + sizeof (h2_6);
6998 do {
6999 switch (pd2.proto) {
7000 case IPPROTO_FRAGMENT:
7001 /*
7002 * ICMPv6 error messages for
7003 * non-first fragments
7004 */
7005 REASON_SET(reason, PFRES_FRAG);
7006 return (PF_DROP);
7007 case IPPROTO_AH:
7008 case IPPROTO_HOPOPTS:
7009 case IPPROTO_ROUTING:
7010 case IPPROTO_DSTOPTS: {
7011 /* get next header and header length */
7012 struct ip6_ext opt6;
7013
7014 if (!pf_pull_hdr(m, off2, &opt6,
7015 sizeof (opt6), NULL, reason,
7016 pd2.af)) {
7017 DPFPRINTF(PF_DEBUG_MISC,
7018 ("pf: ICMPv6 short opt\n"));
7019 return (PF_DROP);
7020 }
7021 if (pd2.proto == IPPROTO_AH)
7022 off2 += (opt6.ip6e_len + 2) * 4;
7023 else
7024 off2 += (opt6.ip6e_len + 1) * 8;
7025 pd2.proto = opt6.ip6e_nxt;
7026 /* goto the next header */
7027 break;
7028 }
7029 default:
7030 terminal++;
7031 break;
7032 }
7033 } while (!terminal);
7034 break;
7035 #endif /* INET6 */
7036 }
7037
7038 switch (pd2.proto) {
7039 case IPPROTO_TCP: {
7040 struct tcphdr th;
7041 u_int32_t seq;
7042 struct pf_state_peer *src, *dst;
7043 u_int8_t dws;
7044 int copyback = 0;
7045
7046 /*
7047 * Only the first 8 bytes of the TCP header can be
7048 * expected. Don't access any TCP header fields after
7049 * th_seq, an ackskew test is not possible.
7050 */
7051 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
7052 pd2.af)) {
7053 DPFPRINTF(PF_DEBUG_MISC,
7054 ("pf: ICMP error message too short "
7055 "(tcp)\n"));
7056 return (PF_DROP);
7057 }
7058
7059 key.af = pd2.af;
7060 key.proto = IPPROTO_TCP;
7061 if (direction == PF_IN) {
7062 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7063 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7064 #ifndef NO_APPLE_EXTENSIONS
7065 key.ext.xport.port = th.th_dport;
7066 key.gwy.xport.port = th.th_sport;
7067 #else
7068 key.ext.port = th.th_dport;
7069 key.gwy.port = th.th_sport;
7070 #endif
7071 } else {
7072 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7073 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7074 #ifndef NO_APPLE_EXTENSIONS
7075 key.lan.xport.port = th.th_dport;
7076 key.ext.xport.port = th.th_sport;
7077 #else
7078 key.lan.port = th.th_dport;
7079 key.ext.port = th.th_sport;
7080 #endif
7081 }
7082
7083 STATE_LOOKUP();
7084
7085 if (direction == (*state)->state_key->direction) {
7086 src = &(*state)->dst;
7087 dst = &(*state)->src;
7088 } else {
7089 src = &(*state)->src;
7090 dst = &(*state)->dst;
7091 }
7092
7093 if (src->wscale && dst->wscale)
7094 dws = dst->wscale & PF_WSCALE_MASK;
7095 else
7096 dws = 0;
7097
7098 /* Demodulate sequence number */
7099 seq = ntohl(th.th_seq) - src->seqdiff;
7100 if (src->seqdiff) {
7101 pf_change_a(&th.th_seq, icmpsum,
7102 htonl(seq), 0);
7103 copyback = 1;
7104 }
7105
7106 if (!SEQ_GEQ(src->seqhi, seq) ||
7107 #ifndef NO_APPLE_MODIFICATION
7108 !SEQ_GEQ(seq,
7109 src->seqlo - ((u_int32_t)dst->max_win << dws))) {
7110 #else
7111 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))) {
7112 #endif
7113 if (pf_status.debug >= PF_DEBUG_MISC) {
7114 printf("pf: BAD ICMP %d:%d ",
7115 icmptype, pd->hdr.icmp->icmp_code);
7116 pf_print_host(pd->src, 0, pd->af);
7117 printf(" -> ");
7118 pf_print_host(pd->dst, 0, pd->af);
7119 printf(" state: ");
7120 pf_print_state(*state);
7121 printf(" seq=%u\n", seq);
7122 }
7123 REASON_SET(reason, PFRES_BADSTATE);
7124 return (PF_DROP);
7125 }
7126
7127 if (STATE_TRANSLATE((*state)->state_key)) {
7128 if (direction == PF_IN) {
7129 pf_change_icmp(pd2.src, &th.th_sport,
7130 daddr, &(*state)->state_key->lan.addr,
7131 #ifndef NO_APPLE_EXTENSIONS
7132 (*state)->state_key->lan.xport.port, NULL,
7133 #else
7134 (*state)->state_key->lan.port, NULL,
7135 #endif
7136 pd2.ip_sum, icmpsum,
7137 pd->ip_sum, 0, pd2.af);
7138 } else {
7139 pf_change_icmp(pd2.dst, &th.th_dport,
7140 saddr, &(*state)->state_key->gwy.addr,
7141 #ifndef NO_APPLE_EXTENSIONS
7142 (*state)->state_key->gwy.xport.port, NULL,
7143 #else
7144 (*state)->state_key->gwy.port, NULL,
7145 #endif
7146 pd2.ip_sum, icmpsum,
7147 pd->ip_sum, 0, pd2.af);
7148 }
7149 copyback = 1;
7150 }
7151
7152 if (copyback) {
7153 #ifndef NO_APPLE_EXTENSIONS
7154 m = pf_lazy_makewritable(pd, m, off2 + 8);
7155 if (!m)
7156 return (PF_DROP);
7157 #endif
7158 switch (pd2.af) {
7159 #if INET
7160 case AF_INET:
7161 m_copyback(m, off, ICMP_MINLEN,
7162 pd->hdr.icmp);
7163 m_copyback(m, ipoff2, sizeof (h2),
7164 &h2);
7165 break;
7166 #endif /* INET */
7167 #if INET6
7168 case AF_INET6:
7169 m_copyback(m, off,
7170 sizeof (struct icmp6_hdr),
7171 pd->hdr.icmp6);
7172 m_copyback(m, ipoff2, sizeof (h2_6),
7173 &h2_6);
7174 break;
7175 #endif /* INET6 */
7176 }
7177 m_copyback(m, off2, 8, &th);
7178 }
7179
7180 return (PF_PASS);
7181 break;
7182 }
7183 case IPPROTO_UDP: {
7184 struct udphdr uh;
7185 #ifndef NO_APPLE_EXTENSIONS
7186 int dx, action;
7187 #endif
7188 if (!pf_pull_hdr(m, off2, &uh, sizeof (uh),
7189 NULL, reason, pd2.af)) {
7190 DPFPRINTF(PF_DEBUG_MISC,
7191 ("pf: ICMP error message too short "
7192 "(udp)\n"));
7193 return (PF_DROP);
7194 }
7195
7196 key.af = pd2.af;
7197 key.proto = IPPROTO_UDP;
7198 if (direction == PF_IN) {
7199 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7200 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7201 #ifndef NO_APPLE_EXTENSIONS
7202 key.ext.xport.port = uh.uh_dport;
7203 key.gwy.xport.port = uh.uh_sport;
7204 dx = PF_IN;
7205 #else
7206 key.ext.port = uh.uh_dport;
7207 key.gwy.port = uh.uh_sport;
7208 #endif
7209 } else {
7210 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7211 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7212 #ifndef NO_APPLE_EXTENSIONS
7213 key.lan.xport.port = uh.uh_dport;
7214 key.ext.xport.port = uh.uh_sport;
7215 dx = PF_OUT;
7216 #else
7217 key.lan.port = uh.uh_dport;
7218 key.ext.port = uh.uh_sport;
7219 #endif
7220 }
7221
7222 #ifndef NO_APPLE_EXTENSIONS
7223 key.proto_variant = PF_EXTFILTER_APD;
7224
7225 if (ntohs(uh.uh_sport) == PF_IKE_PORT &&
7226 ntohs(uh.uh_dport) == PF_IKE_PORT) {
7227 struct pf_ike_hdr ike;
7228 size_t plen =
7229 m->m_pkthdr.len - off2 - sizeof (uh);
7230 if (direction == PF_IN &&
7231 plen < 8 /* PF_IKE_PACKET_MINSIZE */) {
7232 DPFPRINTF(PF_DEBUG_MISC, ("pf: "
7233 "ICMP error, embedded IKE message "
7234 "too small.\n"));
7235 return (PF_DROP);
7236 }
7237
7238 if (plen > sizeof (ike))
7239 plen = sizeof (ike);
7240 m_copydata(m, off + sizeof (uh), plen, &ike);
7241
7242 key.app_state = &as;
7243 as.compare_lan_ext = pf_ike_compare;
7244 as.compare_ext_gwy = pf_ike_compare;
7245 as.u.ike.cookie = ike.initiator_cookie;
7246 }
7247
7248 *state = pf_find_state(kif, &key, dx);
7249
7250 if (key.app_state && *state == 0) {
7251 key.app_state = 0;
7252 *state = pf_find_state(kif, &key, dx);
7253 }
7254
7255 if (*state == 0) {
7256 key.proto_variant = PF_EXTFILTER_AD;
7257 *state = pf_find_state(kif, &key, dx);
7258 }
7259
7260 if (*state == 0) {
7261 key.proto_variant = PF_EXTFILTER_EI;
7262 *state = pf_find_state(kif, &key, dx);
7263 }
7264
7265 if (pf_state_lookup_aux(state, kif, direction, &action))
7266 return (action);
7267 #else
7268 STATE_LOOKUP();
7269 #endif
7270
7271 if (STATE_TRANSLATE((*state)->state_key)) {
7272 if (direction == PF_IN) {
7273 pf_change_icmp(pd2.src, &uh.uh_sport,
7274 daddr, &(*state)->state_key->lan.addr,
7275 #ifndef NO_APPLE_EXTENSIONS
7276 (*state)->state_key->lan.xport.port, &uh.uh_sum,
7277 #else
7278 (*state)->state_key->lan.port, &uh.uh_sum,
7279 #endif
7280 pd2.ip_sum, icmpsum,
7281 pd->ip_sum, 1, pd2.af);
7282 } else {
7283 pf_change_icmp(pd2.dst, &uh.uh_dport,
7284 saddr, &(*state)->state_key->gwy.addr,
7285 #ifndef NO_APPLE_EXTENSIONS
7286 (*state)->state_key->gwy.xport.port, &uh.uh_sum,
7287 #else
7288 (*state)->state_key->gwy.port, &uh.uh_sum,
7289 #endif
7290 pd2.ip_sum, icmpsum,
7291 pd->ip_sum, 1, pd2.af);
7292 }
7293 #ifndef NO_APPLE_EXTENSIONS
7294 m = pf_lazy_makewritable(pd, m,
7295 off2 + sizeof (uh));
7296 if (!m)
7297 return (PF_DROP);
7298 #endif
7299 switch (pd2.af) {
7300 #if INET
7301 case AF_INET:
7302 m_copyback(m, off, ICMP_MINLEN,
7303 pd->hdr.icmp);
7304 m_copyback(m, ipoff2, sizeof (h2), &h2);
7305 break;
7306 #endif /* INET */
7307 #if INET6
7308 case AF_INET6:
7309 m_copyback(m, off,
7310 sizeof (struct icmp6_hdr),
7311 pd->hdr.icmp6);
7312 m_copyback(m, ipoff2, sizeof (h2_6),
7313 &h2_6);
7314 break;
7315 #endif /* INET6 */
7316 }
7317 m_copyback(m, off2, sizeof (uh), &uh);
7318 }
7319
7320 return (PF_PASS);
7321 break;
7322 }
7323 #if INET
7324 case IPPROTO_ICMP: {
7325 struct icmp iih;
7326
7327 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
7328 NULL, reason, pd2.af)) {
7329 DPFPRINTF(PF_DEBUG_MISC,
7330 ("pf: ICMP error message too short i"
7331 "(icmp)\n"));
7332 return (PF_DROP);
7333 }
7334
7335 key.af = pd2.af;
7336 key.proto = IPPROTO_ICMP;
7337 if (direction == PF_IN) {
7338 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7339 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7340 #ifndef NO_APPLE_EXTENSIONS
7341 key.ext.xport.port = 0;
7342 key.gwy.xport.port = iih.icmp_id;
7343 #else
7344 key.ext.port = 0;
7345 key.gwy.port = iih.icmp_id;
7346 #endif
7347 } else {
7348 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7349 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7350 #ifndef NO_APPLE_EXTENSIONS
7351 key.lan.xport.port = iih.icmp_id;
7352 key.ext.xport.port = 0;
7353 #else
7354 key.lan.port = iih.icmp_id;
7355 key.ext.port = 0;
7356 #endif
7357 }
7358
7359 STATE_LOOKUP();
7360
7361 if (STATE_TRANSLATE((*state)->state_key)) {
7362 if (direction == PF_IN) {
7363 pf_change_icmp(pd2.src, &iih.icmp_id,
7364 daddr, &(*state)->state_key->lan.addr,
7365 #ifndef NO_APPLE_EXTENSIONS
7366 (*state)->state_key->lan.xport.port, NULL,
7367 #else
7368 (*state)->state_key->lan.port, NULL,
7369 #endif
7370 pd2.ip_sum, icmpsum,
7371 pd->ip_sum, 0, AF_INET);
7372 } else {
7373 pf_change_icmp(pd2.dst, &iih.icmp_id,
7374 saddr, &(*state)->state_key->gwy.addr,
7375 #ifndef NO_APPLE_EXTENSIONS
7376 (*state)->state_key->gwy.xport.port, NULL,
7377 #else
7378 (*state)->state_key->gwy.port, NULL,
7379 #endif
7380 pd2.ip_sum, icmpsum,
7381 pd->ip_sum, 0, AF_INET);
7382 }
7383 #ifndef NO_APPLE_EXTENSIONS
7384 m = pf_lazy_makewritable(pd, m, off2 + ICMP_MINLEN);
7385 if (!m)
7386 return (PF_DROP);
7387 #endif
7388 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
7389 m_copyback(m, ipoff2, sizeof (h2), &h2);
7390 m_copyback(m, off2, ICMP_MINLEN, &iih);
7391 }
7392
7393 return (PF_PASS);
7394 break;
7395 }
7396 #endif /* INET */
7397 #if INET6
7398 case IPPROTO_ICMPV6: {
7399 struct icmp6_hdr iih;
7400
7401 if (!pf_pull_hdr(m, off2, &iih,
7402 sizeof (struct icmp6_hdr), NULL, reason, pd2.af)) {
7403 DPFPRINTF(PF_DEBUG_MISC,
7404 ("pf: ICMP error message too short "
7405 "(icmp6)\n"));
7406 return (PF_DROP);
7407 }
7408
7409 key.af = pd2.af;
7410 key.proto = IPPROTO_ICMPV6;
7411 if (direction == PF_IN) {
7412 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7413 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7414 #ifndef NO_APPLE_EXTENSIONS
7415 key.ext.xport.port = 0;
7416 key.gwy.xport.port = iih.icmp6_id;
7417 #else
7418 key.ext.port = 0;
7419 key.gwy.port = iih.icmp6_id;
7420 #endif
7421 } else {
7422 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7423 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7424 #ifndef NO_APPLE_EXTENSIONS
7425 key.lan.xport.port = iih.icmp6_id;
7426 key.ext.xport.port = 0;
7427 #else
7428 key.lan.port = iih.icmp6_id;
7429 key.ext.port = 0;
7430 #endif
7431 }
7432
7433 STATE_LOOKUP();
7434
7435 if (STATE_TRANSLATE((*state)->state_key)) {
7436 if (direction == PF_IN) {
7437 pf_change_icmp(pd2.src, &iih.icmp6_id,
7438 daddr, &(*state)->state_key->lan.addr,
7439 #ifndef NO_APPLE_EXTENSIONS
7440 (*state)->state_key->lan.xport.port, NULL,
7441 #else
7442 (*state)->state_key->lan.port, NULL,
7443 #endif
7444 pd2.ip_sum, icmpsum,
7445 pd->ip_sum, 0, AF_INET6);
7446 } else {
7447 pf_change_icmp(pd2.dst, &iih.icmp6_id,
7448 saddr, &(*state)->state_key->gwy.addr,
7449 #ifndef NO_APPLE_EXTENSIONS
7450 (*state)->state_key->gwy.xport.port, NULL,
7451 #else
7452 (*state)->state_key->gwy.port, NULL,
7453 #endif
7454 pd2.ip_sum, icmpsum,
7455 pd->ip_sum, 0, AF_INET6);
7456 }
7457 #ifndef NO_APPLE_EXTENSIONS
7458 m = pf_lazy_makewritable(pd, m, off2 +
7459 sizeof (struct icmp6_hdr));
7460 if (!m)
7461 return (PF_DROP);
7462 #endif
7463 m_copyback(m, off, sizeof (struct icmp6_hdr),
7464 pd->hdr.icmp6);
7465 m_copyback(m, ipoff2, sizeof (h2_6), &h2_6);
7466 m_copyback(m, off2, sizeof (struct icmp6_hdr),
7467 &iih);
7468 }
7469
7470 return (PF_PASS);
7471 break;
7472 }
7473 #endif /* INET6 */
7474 default: {
7475 key.af = pd2.af;
7476 key.proto = pd2.proto;
7477 if (direction == PF_IN) {
7478 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7479 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7480 #ifndef NO_APPLE_EXTENSIONS
7481 key.ext.xport.port = 0;
7482 key.gwy.xport.port = 0;
7483 #else
7484 key.ext.port = 0;
7485 key.gwy.port = 0;
7486 #endif
7487 } else {
7488 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7489 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7490 #ifndef NO_APPLE_EXTENSIONS
7491 key.lan.xport.port = 0;
7492 key.ext.xport.port = 0;
7493 #else
7494 key.lan.port = 0;
7495 key.ext.port = 0;
7496 #endif
7497 }
7498
7499 STATE_LOOKUP();
7500
7501 if (STATE_TRANSLATE((*state)->state_key)) {
7502 if (direction == PF_IN) {
7503 pf_change_icmp(pd2.src, NULL,
7504 daddr, &(*state)->state_key->lan.addr,
7505 0, NULL,
7506 pd2.ip_sum, icmpsum,
7507 pd->ip_sum, 0, pd2.af);
7508 } else {
7509 pf_change_icmp(pd2.dst, NULL,
7510 saddr, &(*state)->state_key->gwy.addr,
7511 0, NULL,
7512 pd2.ip_sum, icmpsum,
7513 pd->ip_sum, 0, pd2.af);
7514 }
7515 switch (pd2.af) {
7516 #if INET
7517 case AF_INET:
7518 #ifndef NO_APPLE_EXTENSIONS
7519 m = pf_lazy_makewritable(pd, m,
7520 ipoff2 + sizeof (h2));
7521 if (!m)
7522 return (PF_DROP);
7523 #endif
7524 m_copyback(m, off, ICMP_MINLEN,
7525 pd->hdr.icmp);
7526 m_copyback(m, ipoff2, sizeof (h2), &h2);
7527 break;
7528 #endif /* INET */
7529 #if INET6
7530 case AF_INET6:
7531 #ifndef NO_APPLE_EXTENSIONS
7532 m = pf_lazy_makewritable(pd, m,
7533 ipoff2 + sizeof (h2_6));
7534 if (!m)
7535 return (PF_DROP);
7536 #endif
7537 m_copyback(m, off,
7538 sizeof (struct icmp6_hdr),
7539 pd->hdr.icmp6);
7540 m_copyback(m, ipoff2, sizeof (h2_6),
7541 &h2_6);
7542 break;
7543 #endif /* INET6 */
7544 }
7545 }
7546
7547 return (PF_PASS);
7548 break;
7549 }
7550 }
7551 }
7552 }
7553
7554 #ifndef NO_APPLE_EXTENSIONS
7555 static int
7556 pf_test_state_grev1(struct pf_state **state, int direction,
7557 struct pfi_kif *kif, int off, struct pf_pdesc *pd)
7558 {
7559 struct pf_state_peer *src;
7560 struct pf_state_peer *dst;
7561 struct pf_state_key_cmp key;
7562 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
7563 struct mbuf *m;
7564
7565 #ifndef NO_APPLE_EXTENSIONS
7566 key.app_state = 0;
7567 #endif
7568 key.af = pd->af;
7569 key.proto = IPPROTO_GRE;
7570 key.proto_variant = PF_GRE_PPTP_VARIANT;
7571 if (direction == PF_IN) {
7572 PF_ACPY(&key.ext.addr, pd->src, key.af);
7573 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7574 key.gwy.xport.call_id = grev1->call_id;
7575 } else {
7576 PF_ACPY(&key.lan.addr, pd->src, key.af);
7577 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7578 key.ext.xport.call_id = grev1->call_id;
7579 }
7580
7581 STATE_LOOKUP();
7582
7583 if (direction == (*state)->state_key->direction) {
7584 src = &(*state)->src;
7585 dst = &(*state)->dst;
7586 } else {
7587 src = &(*state)->dst;
7588 dst = &(*state)->src;
7589 }
7590
7591 /* update states */
7592 if (src->state < PFGRE1S_INITIATING)
7593 src->state = PFGRE1S_INITIATING;
7594
7595 /* update expire time */
7596 (*state)->expire = pf_time_second();
7597 if (src->state >= PFGRE1S_INITIATING &&
7598 dst->state >= PFGRE1S_INITIATING) {
7599 (*state)->timeout = PFTM_GREv1_ESTABLISHED;
7600 src->state = PFGRE1S_ESTABLISHED;
7601 dst->state = PFGRE1S_ESTABLISHED;
7602 } else {
7603 (*state)->timeout = PFTM_GREv1_INITIATING;
7604 }
7605 /* translate source/destination address, if necessary */
7606 if (STATE_GRE_TRANSLATE((*state)->state_key)) {
7607 if (direction == PF_OUT) {
7608 switch (pd->af) {
7609 #if INET
7610 case AF_INET:
7611 pf_change_a(&pd->src->v4.s_addr,
7612 pd->ip_sum,
7613 (*state)->state_key->gwy.addr.v4.s_addr, 0);
7614 break;
7615 #endif /* INET */
7616 #if INET6
7617 case AF_INET6:
7618 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
7619 pd->af);
7620 break;
7621 #endif /* INET6 */
7622 }
7623 } else {
7624 grev1->call_id = (*state)->state_key->lan.xport.call_id;
7625
7626 switch (pd->af) {
7627 #if INET
7628 case AF_INET:
7629 pf_change_a(&pd->dst->v4.s_addr,
7630 pd->ip_sum,
7631 (*state)->state_key->lan.addr.v4.s_addr, 0);
7632 break;
7633 #endif /* INET */
7634 #if INET6
7635 case AF_INET6:
7636 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
7637 pd->af);
7638 break;
7639 #endif /* INET6 */
7640 }
7641 }
7642
7643 m = pf_lazy_makewritable(pd, pd->mp, off + sizeof (*grev1));
7644 if (!m)
7645 return (PF_DROP);
7646 m_copyback(m, off, sizeof (*grev1), grev1);
7647 }
7648
7649 return (PF_PASS);
7650 }
7651
7652 int
7653 pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif,
7654 int off, struct pf_pdesc *pd)
7655 {
7656 #pragma unused(off)
7657 struct pf_state_peer *src;
7658 struct pf_state_peer *dst;
7659 struct pf_state_key_cmp key;
7660 struct pf_esp_hdr *esp = pd->hdr.esp;
7661 int action;
7662
7663 memset(&key, 0, sizeof (key));
7664 key.af = pd->af;
7665 key.proto = IPPROTO_ESP;
7666 if (direction == PF_IN) {
7667 PF_ACPY(&key.ext.addr, pd->src, key.af);
7668 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7669 key.gwy.xport.spi = esp->spi;
7670 } else {
7671 PF_ACPY(&key.lan.addr, pd->src, key.af);
7672 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7673 key.ext.xport.spi = esp->spi;
7674 }
7675
7676 *state = pf_find_state(kif, &key, direction);
7677
7678 if (*state == 0) {
7679 struct pf_state *s;
7680
7681 /*
7682 * <jhw@apple.com>
7683 * No matching state. Look for a blocking state. If we find
7684 * one, then use that state and move it so that it's keyed to
7685 * the SPI in the current packet.
7686 */
7687 if (direction == PF_IN) {
7688 key.gwy.xport.spi = 0;
7689
7690 s = pf_find_state(kif, &key, direction);
7691 if (s) {
7692 struct pf_state_key *sk = s->state_key;
7693
7694 RB_REMOVE(pf_state_tree_ext_gwy,
7695 &pf_statetbl_ext_gwy, sk);
7696 sk->lan.xport.spi = sk->gwy.xport.spi =
7697 esp->spi;
7698
7699 if (RB_INSERT(pf_state_tree_ext_gwy,
7700 &pf_statetbl_ext_gwy, sk))
7701 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
7702 else
7703 *state = s;
7704 }
7705 } else {
7706 key.ext.xport.spi = 0;
7707
7708 s = pf_find_state(kif, &key, direction);
7709 if (s) {
7710 struct pf_state_key *sk = s->state_key;
7711
7712 RB_REMOVE(pf_state_tree_lan_ext,
7713 &pf_statetbl_lan_ext, sk);
7714 sk->ext.xport.spi = esp->spi;
7715
7716 if (RB_INSERT(pf_state_tree_lan_ext,
7717 &pf_statetbl_lan_ext, sk))
7718 pf_detach_state(s, PF_DT_SKIP_LANEXT);
7719 else
7720 *state = s;
7721 }
7722 }
7723
7724 if (s) {
7725 if (*state == 0) {
7726 #if NPFSYNC
7727 if (s->creatorid == pf_status.hostid)
7728 pfsync_delete_state(s);
7729 #endif
7730 s->timeout = PFTM_UNLINKED;
7731 hook_runloop(&s->unlink_hooks,
7732 HOOK_REMOVE|HOOK_FREE);
7733 pf_src_tree_remove_state(s);
7734 pf_free_state(s);
7735 return (PF_DROP);
7736 }
7737 }
7738 }
7739
7740 if (pf_state_lookup_aux(state, kif, direction, &action))
7741 return (action);
7742
7743 if (direction == (*state)->state_key->direction) {
7744 src = &(*state)->src;
7745 dst = &(*state)->dst;
7746 } else {
7747 src = &(*state)->dst;
7748 dst = &(*state)->src;
7749 }
7750
7751 /* update states */
7752 if (src->state < PFESPS_INITIATING)
7753 src->state = PFESPS_INITIATING;
7754
7755 /* update expire time */
7756 (*state)->expire = pf_time_second();
7757 if (src->state >= PFESPS_INITIATING &&
7758 dst->state >= PFESPS_INITIATING) {
7759 (*state)->timeout = PFTM_ESP_ESTABLISHED;
7760 src->state = PFESPS_ESTABLISHED;
7761 dst->state = PFESPS_ESTABLISHED;
7762 } else {
7763 (*state)->timeout = PFTM_ESP_INITIATING;
7764 }
7765 /* translate source/destination address, if necessary */
7766 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
7767 if (direction == PF_OUT) {
7768 switch (pd->af) {
7769 #if INET
7770 case AF_INET:
7771 pf_change_a(&pd->src->v4.s_addr,
7772 pd->ip_sum,
7773 (*state)->state_key->gwy.addr.v4.s_addr, 0);
7774 break;
7775 #endif /* INET */
7776 #if INET6
7777 case AF_INET6:
7778 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
7779 pd->af);
7780 break;
7781 #endif /* INET6 */
7782 }
7783 } else {
7784 switch (pd->af) {
7785 #if INET
7786 case AF_INET:
7787 pf_change_a(&pd->dst->v4.s_addr,
7788 pd->ip_sum,
7789 (*state)->state_key->lan.addr.v4.s_addr, 0);
7790 break;
7791 #endif /* INET */
7792 #if INET6
7793 case AF_INET6:
7794 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
7795 pd->af);
7796 break;
7797 #endif /* INET6 */
7798 }
7799 }
7800 }
7801
7802 return (PF_PASS);
7803 }
7804 #endif
7805
7806 static int
7807 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
7808 struct pf_pdesc *pd)
7809 {
7810 struct pf_state_peer *src, *dst;
7811 struct pf_state_key_cmp key;
7812
7813 #ifndef NO_APPLE_EXTENSIONS
7814 key.app_state = 0;
7815 #endif
7816 key.af = pd->af;
7817 key.proto = pd->proto;
7818 if (direction == PF_IN) {
7819 PF_ACPY(&key.ext.addr, pd->src, key.af);
7820 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7821 #ifndef NO_APPLE_EXTENSIONS
7822 key.ext.xport.port = 0;
7823 key.gwy.xport.port = 0;
7824 #else
7825 key.ext.port = 0;
7826 key.gwy.port = 0;
7827 #endif
7828 } else {
7829 PF_ACPY(&key.lan.addr, pd->src, key.af);
7830 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7831 #ifndef NO_APPLE_EXTENSIONS
7832 key.lan.xport.port = 0;
7833 key.ext.xport.port = 0;
7834 #else
7835 key.lan.port = 0;
7836 key.ext.port = 0;
7837 #endif
7838 }
7839
7840 STATE_LOOKUP();
7841
7842 if (direction == (*state)->state_key->direction) {
7843 src = &(*state)->src;
7844 dst = &(*state)->dst;
7845 } else {
7846 src = &(*state)->dst;
7847 dst = &(*state)->src;
7848 }
7849
7850 /* update states */
7851 if (src->state < PFOTHERS_SINGLE)
7852 src->state = PFOTHERS_SINGLE;
7853 if (dst->state == PFOTHERS_SINGLE)
7854 dst->state = PFOTHERS_MULTIPLE;
7855
7856 /* update expire time */
7857 (*state)->expire = pf_time_second();
7858 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
7859 (*state)->timeout = PFTM_OTHER_MULTIPLE;
7860 else
7861 (*state)->timeout = PFTM_OTHER_SINGLE;
7862
7863 /* translate source/destination address, if necessary */
7864 #ifndef NO_APPLE_EXTENSIONS
7865 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
7866 #else
7867 if (STATE_TRANSLATE((*state)->state_key)) {
7868 #endif
7869 if (direction == PF_OUT) {
7870 switch (pd->af) {
7871 #if INET
7872 case AF_INET:
7873 pf_change_a(&pd->src->v4.s_addr,
7874 pd->ip_sum,
7875 (*state)->state_key->gwy.addr.v4.s_addr,
7876 0);
7877 break;
7878 #endif /* INET */
7879 #if INET6
7880 case AF_INET6:
7881 PF_ACPY(pd->src,
7882 &(*state)->state_key->gwy.addr, pd->af);
7883 break;
7884 #endif /* INET6 */
7885 }
7886 } else {
7887 switch (pd->af) {
7888 #if INET
7889 case AF_INET:
7890 pf_change_a(&pd->dst->v4.s_addr,
7891 pd->ip_sum,
7892 (*state)->state_key->lan.addr.v4.s_addr,
7893 0);
7894 break;
7895 #endif /* INET */
7896 #if INET6
7897 case AF_INET6:
7898 PF_ACPY(pd->dst,
7899 &(*state)->state_key->lan.addr, pd->af);
7900 break;
7901 #endif /* INET6 */
7902 }
7903 }
7904 }
7905
7906 return (PF_PASS);
7907 }
7908
7909 /*
7910 * ipoff and off are measured from the start of the mbuf chain.
7911 * h must be at "ipoff" on the mbuf chain.
7912 */
7913 void *
7914 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
7915 u_short *actionp, u_short *reasonp, sa_family_t af)
7916 {
7917 switch (af) {
7918 #if INET
7919 case AF_INET: {
7920 struct ip *h = mtod(m, struct ip *);
7921 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
7922
7923 if (fragoff) {
7924 if (fragoff >= len) {
7925 ACTION_SET(actionp, PF_PASS);
7926 } else {
7927 ACTION_SET(actionp, PF_DROP);
7928 REASON_SET(reasonp, PFRES_FRAG);
7929 }
7930 return (NULL);
7931 }
7932 if (m->m_pkthdr.len < off + len ||
7933 ntohs(h->ip_len) < off + len) {
7934 ACTION_SET(actionp, PF_DROP);
7935 REASON_SET(reasonp, PFRES_SHORT);
7936 return (NULL);
7937 }
7938 break;
7939 }
7940 #endif /* INET */
7941 #if INET6
7942 case AF_INET6: {
7943 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
7944
7945 if (m->m_pkthdr.len < off + len ||
7946 (ntohs(h->ip6_plen) + sizeof (struct ip6_hdr)) <
7947 (unsigned)(off + len)) {
7948 ACTION_SET(actionp, PF_DROP);
7949 REASON_SET(reasonp, PFRES_SHORT);
7950 return (NULL);
7951 }
7952 break;
7953 }
7954 #endif /* INET6 */
7955 }
7956 m_copydata(m, off, len, p);
7957 return (p);
7958 }
7959
7960 int
7961 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
7962 {
7963 #pragma unused(kif)
7964 struct sockaddr_in *dst;
7965 int ret = 1;
7966 #if INET6
7967 struct sockaddr_in6 *dst6;
7968 struct route_in6 ro;
7969 #else
7970 struct route ro;
7971 #endif
7972
7973 bzero(&ro, sizeof (ro));
7974 switch (af) {
7975 case AF_INET:
7976 dst = satosin(&ro.ro_dst);
7977 dst->sin_family = AF_INET;
7978 dst->sin_len = sizeof (*dst);
7979 dst->sin_addr = addr->v4;
7980 break;
7981 #if INET6
7982 case AF_INET6:
7983 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
7984 dst6->sin6_family = AF_INET6;
7985 dst6->sin6_len = sizeof (*dst6);
7986 dst6->sin6_addr = addr->v6;
7987 break;
7988 #endif /* INET6 */
7989 default:
7990 return (0);
7991 }
7992
7993 /* XXX: IFT_ENC is not currently used by anything*/
7994 /* Skip checks for ipsec interfaces */
7995 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
7996 goto out;
7997
7998 rtalloc((struct route *)&ro);
7999
8000 out:
8001 if (ro.ro_rt != NULL)
8002 RTFREE(ro.ro_rt);
8003 return (ret);
8004 }
8005
8006 int
8007 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
8008 {
8009 #pragma unused(aw)
8010 struct sockaddr_in *dst;
8011 #if INET6
8012 struct sockaddr_in6 *dst6;
8013 struct route_in6 ro;
8014 #else
8015 struct route ro;
8016 #endif
8017 int ret = 0;
8018
8019 bzero(&ro, sizeof (ro));
8020 switch (af) {
8021 case AF_INET:
8022 dst = satosin(&ro.ro_dst);
8023 dst->sin_family = AF_INET;
8024 dst->sin_len = sizeof (*dst);
8025 dst->sin_addr = addr->v4;
8026 break;
8027 #if INET6
8028 case AF_INET6:
8029 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
8030 dst6->sin6_family = AF_INET6;
8031 dst6->sin6_len = sizeof (*dst6);
8032 dst6->sin6_addr = addr->v6;
8033 break;
8034 #endif /* INET6 */
8035 default:
8036 return (0);
8037 }
8038
8039 rtalloc((struct route *)&ro);
8040
8041 if (ro.ro_rt != NULL) {
8042 RTFREE(ro.ro_rt);
8043 }
8044
8045 return (ret);
8046 }
8047
8048 #if INET
8049 static void
8050 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
8051 struct pf_state *s, struct pf_pdesc *pd)
8052 {
8053 #pragma unused(pd)
8054 struct mbuf *m0, *m1;
8055 struct route iproute;
8056 struct route *ro = NULL;
8057 struct sockaddr_in *dst;
8058 struct ip *ip;
8059 struct ifnet *ifp = NULL;
8060 struct pf_addr naddr;
8061 struct pf_src_node *sn = NULL;
8062 int error = 0;
8063 int sw_csum = 0;
8064
8065 if (m == NULL || *m == NULL || r == NULL ||
8066 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
8067 panic("pf_route: invalid parameters");
8068
8069 if (pd->pf_mtag->routed++ > 3) {
8070 m0 = *m;
8071 *m = NULL;
8072 goto bad;
8073 }
8074
8075 if (r->rt == PF_DUPTO) {
8076 if ((m0 = m_copym(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
8077 return;
8078 } else {
8079 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
8080 return;
8081 m0 = *m;
8082 }
8083
8084 if (m0->m_len < (int)sizeof (struct ip)) {
8085 DPFPRINTF(PF_DEBUG_URGENT,
8086 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8087 goto bad;
8088 }
8089
8090 ip = mtod(m0, struct ip *);
8091
8092 ro = &iproute;
8093 bzero((caddr_t)ro, sizeof (*ro));
8094 dst = satosin(&ro->ro_dst);
8095 dst->sin_family = AF_INET;
8096 dst->sin_len = sizeof (*dst);
8097 dst->sin_addr = ip->ip_dst;
8098
8099 if (r->rt == PF_FASTROUTE) {
8100 rtalloc(ro);
8101 if (ro->ro_rt == 0) {
8102 ipstat.ips_noroute++;
8103 goto bad;
8104 }
8105
8106 ifp = ro->ro_rt->rt_ifp;
8107 ro->ro_rt->rt_use++;
8108
8109 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
8110 dst = satosin(ro->ro_rt->rt_gateway);
8111 } else {
8112 if (TAILQ_EMPTY(&r->rpool.list)) {
8113 DPFPRINTF(PF_DEBUG_URGENT,
8114 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
8115 goto bad;
8116 }
8117 if (s == NULL) {
8118 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
8119 &naddr, NULL, &sn);
8120 if (!PF_AZERO(&naddr, AF_INET))
8121 dst->sin_addr.s_addr = naddr.v4.s_addr;
8122 ifp = r->rpool.cur->kif ?
8123 r->rpool.cur->kif->pfik_ifp : NULL;
8124 } else {
8125 if (!PF_AZERO(&s->rt_addr, AF_INET))
8126 dst->sin_addr.s_addr =
8127 s->rt_addr.v4.s_addr;
8128 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
8129 }
8130 }
8131 if (ifp == NULL)
8132 goto bad;
8133
8134 if (oifp != ifp) {
8135 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
8136 goto bad;
8137 else if (m0 == NULL)
8138 goto done;
8139 if (m0->m_len < (int)sizeof (struct ip)) {
8140 DPFPRINTF(PF_DEBUG_URGENT,
8141 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8142 goto bad;
8143 }
8144 ip = mtod(m0, struct ip *);
8145 }
8146
8147 /* Copied from ip_output. */
8148
8149 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
8150 m0->m_pkthdr.csum_flags |= CSUM_IP;
8151 sw_csum = m0->m_pkthdr.csum_flags &
8152 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
8153
8154 if (ifp->if_hwassist & CSUM_TCP_SUM16) {
8155 /*
8156 * Special case code for GMACE
8157 * frames that can be checksumed by GMACE SUM16 HW:
8158 * frame >64, no fragments, no UDP
8159 */
8160 if (apple_hwcksum_tx && (m0->m_pkthdr.csum_flags & CSUM_TCP) &&
8161 (ntohs(ip->ip_len) > 50) &&
8162 (ntohs(ip->ip_len) <= ifp->if_mtu)) {
8163 /*
8164 * Apple GMAC HW, expects:
8165 * STUFF_OFFSET << 16 | START_OFFSET
8166 */
8167 /* IP+Enet header length */
8168 u_short offset = ((ip->ip_hl) << 2) + 14;
8169 u_short csumprev = m0->m_pkthdr.csum_data & 0xffff;
8170 m0->m_pkthdr.csum_flags = CSUM_DATA_VALID |
8171 CSUM_TCP_SUM16; /* for GMAC */
8172 m0->m_pkthdr.csum_data = (csumprev + offset) << 16 ;
8173 m0->m_pkthdr.csum_data += offset;
8174 /* do IP hdr chksum in software */
8175 sw_csum = CSUM_DELAY_IP;
8176 } else {
8177 /* let the software handle any UDP or TCP checksums */
8178 sw_csum |= (CSUM_DELAY_DATA & m0->m_pkthdr.csum_flags);
8179 }
8180 } else if (apple_hwcksum_tx == 0) {
8181 sw_csum |= (CSUM_DELAY_DATA | CSUM_DELAY_IP) &
8182 m0->m_pkthdr.csum_flags;
8183 }
8184
8185 if (sw_csum & CSUM_DELAY_DATA) {
8186 in_delayed_cksum(m0);
8187 sw_csum &= ~CSUM_DELAY_DATA;
8188 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
8189 }
8190
8191 if (apple_hwcksum_tx != 0) {
8192 m0->m_pkthdr.csum_flags &=
8193 IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
8194 } else {
8195 m0->m_pkthdr.csum_flags = 0;
8196 }
8197
8198 if (ntohs(ip->ip_len) <= ifp->if_mtu ||
8199 (ifp->if_hwassist & CSUM_FRAGMENT)) {
8200 ip->ip_sum = 0;
8201 if (sw_csum & CSUM_DELAY_IP)
8202 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
8203 error = ifnet_output(ifp, PF_INET, m0, ro, sintosa(dst));
8204 goto done;
8205 }
8206
8207 /*
8208 * Too large for interface; fragment if possible.
8209 * Must be able to put at least 8 bytes per fragment.
8210 */
8211 if (ip->ip_off & htons(IP_DF)) {
8212 ipstat.ips_cantfrag++;
8213 if (r->rt != PF_DUPTO) {
8214 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
8215 ifp->if_mtu);
8216 goto done;
8217 } else
8218 goto bad;
8219 }
8220
8221 m1 = m0;
8222 error = ip_fragment(m0, ifp, ifp->if_mtu, sw_csum);
8223 if (error) {
8224 m0 = NULL;
8225 goto bad;
8226 }
8227
8228 for (m0 = m1; m0; m0 = m1) {
8229 m1 = m0->m_nextpkt;
8230 m0->m_nextpkt = 0;
8231 if (error == 0)
8232 error = ifnet_output(ifp, PF_INET, m0, ro,
8233 sintosa(dst));
8234 else
8235 m_freem(m0);
8236 }
8237
8238 if (error == 0)
8239 ipstat.ips_fragmented++;
8240
8241 done:
8242 if (r->rt != PF_DUPTO)
8243 *m = NULL;
8244 if (ro == &iproute && ro->ro_rt)
8245 RTFREE(ro->ro_rt);
8246 return;
8247
8248 bad:
8249 m_freem(m0);
8250 goto done;
8251 }
8252 #endif /* INET */
8253
8254 #if INET6
8255 static void
8256 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
8257 struct pf_state *s, struct pf_pdesc *pd)
8258 {
8259 #pragma unused(pd)
8260 struct mbuf *m0;
8261 struct route_in6 ip6route;
8262 struct route_in6 *ro;
8263 struct sockaddr_in6 *dst;
8264 struct ip6_hdr *ip6;
8265 struct ifnet *ifp = NULL;
8266 struct pf_addr naddr;
8267 struct pf_src_node *sn = NULL;
8268 int error = 0;
8269
8270 if (m == NULL || *m == NULL || r == NULL ||
8271 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
8272 panic("pf_route6: invalid parameters");
8273
8274 if (pd->pf_mtag->routed++ > 3) {
8275 m0 = *m;
8276 *m = NULL;
8277 goto bad;
8278 }
8279
8280 if (r->rt == PF_DUPTO) {
8281 if ((m0 = m_copym(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
8282 return;
8283 } else {
8284 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
8285 return;
8286 m0 = *m;
8287 }
8288
8289 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
8290 DPFPRINTF(PF_DEBUG_URGENT,
8291 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
8292 goto bad;
8293 }
8294 ip6 = mtod(m0, struct ip6_hdr *);
8295
8296 ro = &ip6route;
8297 bzero((caddr_t)ro, sizeof (*ro));
8298 dst = (struct sockaddr_in6 *)&ro->ro_dst;
8299 dst->sin6_family = AF_INET6;
8300 dst->sin6_len = sizeof (*dst);
8301 dst->sin6_addr = ip6->ip6_dst;
8302
8303 /* Cheat. XXX why only in the v6 case??? */
8304 if (r->rt == PF_FASTROUTE) {
8305 struct pf_mtag *pf_mtag;
8306
8307 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
8308 goto bad;
8309 pf_mtag->flags |= PF_TAG_GENERATED;
8310 ip6_output(m0, NULL, NULL, 0, NULL, NULL, 0);
8311 return;
8312 }
8313
8314 if (TAILQ_EMPTY(&r->rpool.list)) {
8315 DPFPRINTF(PF_DEBUG_URGENT,
8316 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
8317 goto bad;
8318 }
8319 if (s == NULL) {
8320 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
8321 &naddr, NULL, &sn);
8322 if (!PF_AZERO(&naddr, AF_INET6))
8323 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
8324 &naddr, AF_INET6);
8325 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
8326 } else {
8327 if (!PF_AZERO(&s->rt_addr, AF_INET6))
8328 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
8329 &s->rt_addr, AF_INET6);
8330 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
8331 }
8332 if (ifp == NULL)
8333 goto bad;
8334
8335 if (oifp != ifp) {
8336 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
8337 goto bad;
8338 else if (m0 == NULL)
8339 goto done;
8340 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
8341 DPFPRINTF(PF_DEBUG_URGENT, ("pf_route6: m0->m_len "
8342 "< sizeof (struct ip6_hdr)\n"));
8343 goto bad;
8344 }
8345 ip6 = mtod(m0, struct ip6_hdr *);
8346 }
8347
8348 /*
8349 * If the packet is too large for the outgoing interface,
8350 * send back an icmp6 error.
8351 */
8352 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
8353 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
8354 if ((unsigned)m0->m_pkthdr.len <= ifp->if_mtu) {
8355 error = nd6_output(ifp, ifp, m0, dst, NULL, 0);
8356 } else {
8357 in6_ifstat_inc(ifp, ifs6_in_toobig);
8358 if (r->rt != PF_DUPTO)
8359 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
8360 else
8361 goto bad;
8362 }
8363
8364 done:
8365 if (r->rt != PF_DUPTO)
8366 *m = NULL;
8367 return;
8368
8369 bad:
8370 m_freem(m0);
8371 goto done;
8372 }
8373 #endif /* INET6 */
8374
8375
8376 /*
8377 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
8378 * off is the offset where the protocol header starts
8379 * len is the total length of protocol header plus payload
8380 * returns 0 when the checksum is valid, otherwise returns 1.
8381 */
8382 static int
8383 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
8384 sa_family_t af)
8385 {
8386 u_int16_t sum;
8387
8388 switch (p) {
8389 case IPPROTO_TCP:
8390 case IPPROTO_UDP:
8391 /*
8392 * Optimize for the common case; if the hardware calculated
8393 * value doesn't include pseudo-header checksum, or if it
8394 * is partially-computed (only 16-bit summation), do it in
8395 * software below.
8396 */
8397 if (apple_hwcksum_rx && (m->m_pkthdr.csum_flags &
8398 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
8399 (m->m_pkthdr.csum_data ^ 0xffff) == 0) {
8400 return (0);
8401 }
8402 break;
8403 case IPPROTO_ICMP:
8404 #if INET6
8405 case IPPROTO_ICMPV6:
8406 #endif /* INET6 */
8407 break;
8408 default:
8409 return (1);
8410 }
8411 if (off < (int)sizeof (struct ip) || len < (int)sizeof (struct udphdr))
8412 return (1);
8413 if (m->m_pkthdr.len < off + len)
8414 return (1);
8415 switch (af) {
8416 #if INET
8417 case AF_INET:
8418 if (p == IPPROTO_ICMP) {
8419 if (m->m_len < off)
8420 return (1);
8421 m->m_data += off;
8422 m->m_len -= off;
8423 sum = in_cksum(m, len);
8424 m->m_data -= off;
8425 m->m_len += off;
8426 } else {
8427 if (m->m_len < (int)sizeof (struct ip))
8428 return (1);
8429 sum = inet_cksum(m, p, off, len);
8430 }
8431 break;
8432 #endif /* INET */
8433 #if INET6
8434 case AF_INET6:
8435 if (m->m_len < (int)sizeof (struct ip6_hdr))
8436 return (1);
8437 sum = inet6_cksum(m, p, off, len);
8438 break;
8439 #endif /* INET6 */
8440 default:
8441 return (1);
8442 }
8443 if (sum) {
8444 switch (p) {
8445 case IPPROTO_TCP:
8446 tcpstat.tcps_rcvbadsum++;
8447 break;
8448 case IPPROTO_UDP:
8449 udpstat.udps_badsum++;
8450 break;
8451 case IPPROTO_ICMP:
8452 icmpstat.icps_checksum++;
8453 break;
8454 #if INET6
8455 case IPPROTO_ICMPV6:
8456 icmp6stat.icp6s_checksum++;
8457 break;
8458 #endif /* INET6 */
8459 }
8460 return (1);
8461 }
8462 return (0);
8463 }
8464
8465 #if INET
8466 #ifndef NO_APPLE_EXTENSIONS
8467 #define PF_APPLE_UPDATE_PDESC_IPv4() \
8468 do { \
8469 if (m && pd.mp && m != pd.mp) { \
8470 m = pd.mp; \
8471 h = mtod(m, struct ip *); \
8472 } \
8473 } while (0)
8474 #endif
8475
8476 int
8477 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
8478 struct ether_header *eh)
8479 {
8480 struct pfi_kif *kif;
8481 u_short action, reason = 0, log = 0;
8482 struct mbuf *m = *m0;
8483 struct ip *h = 0;
8484 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
8485 struct pf_state *s = NULL;
8486 struct pf_state_key *sk = NULL;
8487 struct pf_ruleset *ruleset = NULL;
8488 struct pf_pdesc pd;
8489 int off, dirndx, pqid = 0;
8490
8491 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
8492
8493 if (!pf_status.running)
8494 return (PF_PASS);
8495
8496 memset(&pd, 0, sizeof (pd));
8497
8498 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
8499 DPFPRINTF(PF_DEBUG_URGENT,
8500 ("pf_test: pf_get_mtag returned NULL\n"));
8501 return (PF_DROP);
8502 }
8503
8504 if (pd.pf_mtag->flags & PF_TAG_GENERATED)
8505 return (PF_PASS);
8506
8507 kif = (struct pfi_kif *)ifp->if_pf_kif;
8508
8509 if (kif == NULL) {
8510 DPFPRINTF(PF_DEBUG_URGENT,
8511 ("pf_test: kif == NULL, if_name %s\n", ifp->if_name));
8512 return (PF_DROP);
8513 }
8514 if (kif->pfik_flags & PFI_IFLAG_SKIP)
8515 return (PF_PASS);
8516
8517 #ifdef DIAGNOSTIC
8518 if ((m->m_flags & M_PKTHDR) == 0)
8519 panic("non-M_PKTHDR is passed to pf_test");
8520 #endif /* DIAGNOSTIC */
8521
8522 if (m->m_pkthdr.len < (int)sizeof (*h)) {
8523 action = PF_DROP;
8524 REASON_SET(&reason, PFRES_SHORT);
8525 log = 1;
8526 goto done;
8527 }
8528
8529 /* We do IP header normalization and packet reassembly here */
8530 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
8531 action = PF_DROP;
8532 goto done;
8533 }
8534 m = *m0; /* pf_normalize messes with m0 */
8535 h = mtod(m, struct ip *);
8536
8537 off = h->ip_hl << 2;
8538 if (off < (int)sizeof (*h)) {
8539 action = PF_DROP;
8540 REASON_SET(&reason, PFRES_SHORT);
8541 log = 1;
8542 goto done;
8543 }
8544
8545 pd.src = (struct pf_addr *)&h->ip_src;
8546 pd.dst = (struct pf_addr *)&h->ip_dst;
8547 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
8548 pd.ip_sum = &h->ip_sum;
8549 pd.proto = h->ip_p;
8550 #ifndef NO_APPLE_EXTENSIONS
8551 pd.proto_variant = 0;
8552 pd.mp = m;
8553 pd.lmw = 0;
8554 #endif
8555 pd.af = AF_INET;
8556 pd.tos = h->ip_tos;
8557 pd.tot_len = ntohs(h->ip_len);
8558 pd.eh = eh;
8559
8560 /* handle fragments that didn't get reassembled by normalization */
8561 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
8562 action = pf_test_fragment(&r, dir, kif, m, h,
8563 &pd, &a, &ruleset);
8564 goto done;
8565 }
8566
8567 switch (h->ip_p) {
8568
8569 case IPPROTO_TCP: {
8570 struct tcphdr th;
8571 pd.hdr.tcp = &th;
8572 if (!pf_pull_hdr(m, off, &th, sizeof (th),
8573 &action, &reason, AF_INET)) {
8574 log = action != PF_PASS;
8575 goto done;
8576 }
8577 pd.p_len = pd.tot_len - off - (th.th_off << 2);
8578 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
8579 pqid = 1;
8580 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
8581 #ifndef NO_APPLE_EXTENSIONS
8582 if (pd.lmw < 0)
8583 goto done;
8584 PF_APPLE_UPDATE_PDESC_IPv4();
8585 #endif
8586 if (action == PF_DROP)
8587 goto done;
8588 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
8589 &reason);
8590 #ifndef NO_APPLE_EXTENSIONS
8591 if (pd.lmw < 0)
8592 goto done;
8593 PF_APPLE_UPDATE_PDESC_IPv4();
8594 #endif
8595 if (action == PF_PASS) {
8596 #if NPFSYNC
8597 pfsync_update_state(s);
8598 #endif /* NPFSYNC */
8599 r = s->rule.ptr;
8600 a = s->anchor.ptr;
8601 log = s->log;
8602 } else if (s == NULL)
8603 action = pf_test_rule(&r, &s, dir, kif,
8604 m, off, h, &pd, &a, &ruleset, &ipintrq);
8605 break;
8606 }
8607
8608 case IPPROTO_UDP: {
8609 struct udphdr uh;
8610
8611 pd.hdr.udp = &uh;
8612 if (!pf_pull_hdr(m, off, &uh, sizeof (uh),
8613 &action, &reason, AF_INET)) {
8614 log = action != PF_PASS;
8615 goto done;
8616 }
8617 if (uh.uh_dport == 0 ||
8618 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
8619 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
8620 action = PF_DROP;
8621 REASON_SET(&reason, PFRES_SHORT);
8622 goto done;
8623 }
8624 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
8625 &reason);
8626 #ifndef NO_APPLE_EXTENSIONS
8627 if (pd.lmw < 0)
8628 goto done;
8629 PF_APPLE_UPDATE_PDESC_IPv4();
8630 #endif
8631 if (action == PF_PASS) {
8632 #if NPFSYNC
8633 pfsync_update_state(s);
8634 #endif /* NPFSYNC */
8635 r = s->rule.ptr;
8636 a = s->anchor.ptr;
8637 log = s->log;
8638 } else if (s == NULL)
8639 action = pf_test_rule(&r, &s, dir, kif,
8640 m, off, h, &pd, &a, &ruleset, &ipintrq);
8641 break;
8642 }
8643
8644 case IPPROTO_ICMP: {
8645 struct icmp ih;
8646
8647 pd.hdr.icmp = &ih;
8648 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
8649 &action, &reason, AF_INET)) {
8650 log = action != PF_PASS;
8651 goto done;
8652 }
8653 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
8654 &reason);
8655 #ifndef NO_APPLE_EXTENSIONS
8656 if (pd.lmw < 0)
8657 goto done;
8658 PF_APPLE_UPDATE_PDESC_IPv4();
8659 #endif
8660 if (action == PF_PASS) {
8661 #if NPFSYNC
8662 pfsync_update_state(s);
8663 #endif /* NPFSYNC */
8664 r = s->rule.ptr;
8665 a = s->anchor.ptr;
8666 log = s->log;
8667 } else if (s == NULL)
8668 action = pf_test_rule(&r, &s, dir, kif,
8669 m, off, h, &pd, &a, &ruleset, &ipintrq);
8670 break;
8671 }
8672
8673 #ifndef NO_APPLE_EXTENSIONS
8674 case IPPROTO_ESP: {
8675 struct pf_esp_hdr esp;
8676
8677 pd.hdr.esp = &esp;
8678 if (!pf_pull_hdr(m, off, &esp, sizeof (esp), &action, &reason,
8679 AF_INET)) {
8680 log = action != PF_PASS;
8681 goto done;
8682 }
8683 action = pf_test_state_esp(&s, dir, kif, off, &pd);
8684 if (pd.lmw < 0)
8685 goto done;
8686 PF_APPLE_UPDATE_PDESC_IPv4();
8687 if (action == PF_PASS) {
8688 #if NPFSYNC
8689 pfsync_update_state(s);
8690 #endif /* NPFSYNC */
8691 r = s->rule.ptr;
8692 a = s->anchor.ptr;
8693 log = s->log;
8694 } else if (s == NULL)
8695 action = pf_test_rule(&r, &s, dir, kif,
8696 m, off, h, &pd, &a, &ruleset, &ipintrq);
8697 break;
8698 }
8699
8700 case IPPROTO_GRE: {
8701 struct pf_grev1_hdr grev1;
8702 pd.hdr.grev1 = &grev1;
8703 if (!pf_pull_hdr(m, off, &grev1, sizeof (grev1), &action,
8704 &reason, AF_INET)) {
8705 log = (action != PF_PASS);
8706 goto done;
8707 }
8708 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
8709 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
8710 if (ntohs(grev1.payload_length) >
8711 m->m_pkthdr.len - off) {
8712 action = PF_DROP;
8713 REASON_SET(&reason, PFRES_SHORT);
8714 goto done;
8715 }
8716 pd.proto_variant = PF_GRE_PPTP_VARIANT;
8717 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
8718 if (pd.lmw < 0) goto done;
8719 PF_APPLE_UPDATE_PDESC_IPv4();
8720 if (action == PF_PASS) {
8721 #if NPFSYNC
8722 pfsync_update_state(s);
8723 #endif /* NPFSYNC */
8724 r = s->rule.ptr;
8725 a = s->anchor.ptr;
8726 log = s->log;
8727 break;
8728 } else if (s == NULL) {
8729 action = pf_test_rule(&r, &s, dir, kif, m, off,
8730 h, &pd, &a, &ruleset, &ipintrq);
8731 if (action == PF_PASS)
8732 break;
8733 }
8734 }
8735
8736 /* not GREv1/PPTP, so treat as ordinary GRE... */
8737 }
8738 #endif
8739
8740 default:
8741 action = pf_test_state_other(&s, dir, kif, &pd);
8742 #ifndef NO_APPLE_EXTENSIONS
8743 if (pd.lmw < 0)
8744 goto done;
8745 PF_APPLE_UPDATE_PDESC_IPv4();
8746 #endif
8747 if (action == PF_PASS) {
8748 #if NPFSYNC
8749 pfsync_update_state(s);
8750 #endif /* NPFSYNC */
8751 r = s->rule.ptr;
8752 a = s->anchor.ptr;
8753 log = s->log;
8754 } else if (s == NULL)
8755 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
8756 &pd, &a, &ruleset, &ipintrq);
8757 break;
8758 }
8759
8760 done:
8761 #ifndef NO_APPLE_EXTENSIONS
8762 *m0 = pd.mp;
8763 PF_APPLE_UPDATE_PDESC_IPv4();
8764 #endif
8765
8766 if (action == PF_PASS && h->ip_hl > 5 &&
8767 !((s && s->allow_opts) || r->allow_opts)) {
8768 action = PF_DROP;
8769 REASON_SET(&reason, PFRES_IPOPTIONS);
8770 log = 1;
8771 DPFPRINTF(PF_DEBUG_MISC,
8772 ("pf: dropping packet with ip options [hlen=%u]\n",
8773 (unsigned int) h->ip_hl));
8774 }
8775
8776 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid))
8777 (void) pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0,
8778 r->rtableid);
8779
8780 #if ALTQ
8781 if (action == PF_PASS && r->qid) {
8782 if (pqid || (pd.tos & IPTOS_LOWDELAY))
8783 pd.pf_mtag->qid = r->pqid;
8784 else
8785 pd.pf_mtag->qid = r->qid;
8786 /* add hints for ecn */
8787 pd.pf_mtag->hdr = h;
8788 }
8789 #endif /* ALTQ */
8790
8791 /*
8792 * connections redirected to loopback should not match sockets
8793 * bound specifically to loopback due to security implications,
8794 * see tcp_input() and in_pcblookup_listen().
8795 */
8796 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
8797 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
8798 (s->nat_rule.ptr->action == PF_RDR ||
8799 s->nat_rule.ptr->action == PF_BINAT) &&
8800 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
8801 pd.pf_mtag->flags |= PF_TAG_TRANSLATE_LOCALHOST;
8802
8803 if (log) {
8804 struct pf_rule *lr;
8805
8806 if (s != NULL && s->nat_rule.ptr != NULL &&
8807 s->nat_rule.ptr->log & PF_LOG_ALL)
8808 lr = s->nat_rule.ptr;
8809 else
8810 lr = r;
8811 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
8812 &pd);
8813 }
8814
8815 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
8816 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
8817
8818 if (action == PF_PASS || r->action == PF_DROP) {
8819 dirndx = (dir == PF_OUT);
8820 r->packets[dirndx]++;
8821 r->bytes[dirndx] += pd.tot_len;
8822 if (a != NULL) {
8823 a->packets[dirndx]++;
8824 a->bytes[dirndx] += pd.tot_len;
8825 }
8826 if (s != NULL) {
8827 sk = s->state_key;
8828 if (s->nat_rule.ptr != NULL) {
8829 s->nat_rule.ptr->packets[dirndx]++;
8830 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
8831 }
8832 if (s->src_node != NULL) {
8833 s->src_node->packets[dirndx]++;
8834 s->src_node->bytes[dirndx] += pd.tot_len;
8835 }
8836 if (s->nat_src_node != NULL) {
8837 s->nat_src_node->packets[dirndx]++;
8838 s->nat_src_node->bytes[dirndx] += pd.tot_len;
8839 }
8840 dirndx = (dir == sk->direction) ? 0 : 1;
8841 s->packets[dirndx]++;
8842 s->bytes[dirndx] += pd.tot_len;
8843 }
8844 tr = r;
8845 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
8846 if (nr != NULL) {
8847 struct pf_addr *x;
8848 /*
8849 * XXX: we need to make sure that the addresses
8850 * passed to pfr_update_stats() are the same than
8851 * the addresses used during matching (pfr_match)
8852 */
8853 if (r == &pf_default_rule) {
8854 tr = nr;
8855 x = (sk == NULL || sk->direction == dir) ?
8856 &pd.baddr : &pd.naddr;
8857 } else
8858 x = (sk == NULL || sk->direction == dir) ?
8859 &pd.naddr : &pd.baddr;
8860 if (x == &pd.baddr || s == NULL) {
8861 /* we need to change the address */
8862 if (dir == PF_OUT)
8863 pd.src = x;
8864 else
8865 pd.dst = x;
8866 }
8867 }
8868 if (tr->src.addr.type == PF_ADDR_TABLE)
8869 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
8870 sk->direction == dir) ?
8871 pd.src : pd.dst, pd.af,
8872 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
8873 tr->src.neg);
8874 if (tr->dst.addr.type == PF_ADDR_TABLE)
8875 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
8876 sk->direction == dir) ? pd.dst : pd.src, pd.af,
8877 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
8878 tr->dst.neg);
8879 }
8880
8881 #ifndef NO_APPLE_EXTENSIONS
8882 VERIFY(m == NULL || pd.mp == NULL || pd.mp == m);
8883
8884 if (*m0) {
8885 if (pd.lmw < 0) {
8886 REASON_SET(&reason, PFRES_MEMORY);
8887 action = PF_DROP;
8888 }
8889
8890 if (action == PF_DROP) {
8891 m_freem(*m0);
8892 *m0 = NULL;
8893 return (PF_DROP);
8894 }
8895
8896 *m0 = m;
8897 }
8898 #endif
8899
8900 if (action == PF_SYNPROXY_DROP) {
8901 m_freem(*m0);
8902 *m0 = NULL;
8903 action = PF_PASS;
8904 } else if (r->rt)
8905 /* pf_route can free the mbuf causing *m0 to become NULL */
8906 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
8907
8908 return (action);
8909 }
8910 #endif /* INET */
8911
8912 #if INET6
8913 #ifndef NO_APPLE_EXTENSIONS
8914 #define PF_APPLE_UPDATE_PDESC_IPv6() \
8915 do { \
8916 if (m && pd.mp && m != pd.mp) { \
8917 if (n == m) \
8918 n = pd.mp; \
8919 m = pd.mp; \
8920 h = mtod(m, struct ip6_hdr *); \
8921 } \
8922 } while (0)
8923 #endif
8924
8925 int
8926 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
8927 struct ether_header *eh)
8928 {
8929 struct pfi_kif *kif;
8930 u_short action, reason = 0, log = 0;
8931 struct mbuf *m = *m0, *n = NULL;
8932 struct ip6_hdr *h;
8933 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
8934 struct pf_state *s = NULL;
8935 struct pf_state_key *sk = NULL;
8936 struct pf_ruleset *ruleset = NULL;
8937 struct pf_pdesc pd;
8938 int off, terminal = 0, dirndx, rh_cnt = 0;
8939
8940 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
8941
8942 if (!pf_status.running)
8943 return (PF_PASS);
8944
8945 memset(&pd, 0, sizeof (pd));
8946
8947 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
8948 DPFPRINTF(PF_DEBUG_URGENT,
8949 ("pf_test6: pf_get_mtag returned NULL\n"));
8950 return (PF_DROP);
8951 }
8952
8953 if (pd.pf_mtag->flags & PF_TAG_GENERATED)
8954 return (PF_PASS);
8955
8956 kif = (struct pfi_kif *)ifp->if_pf_kif;
8957
8958 if (kif == NULL) {
8959 DPFPRINTF(PF_DEBUG_URGENT,
8960 ("pf_test6: kif == NULL, if_name %s\n", ifp->if_name));
8961 return (PF_DROP);
8962 }
8963 if (kif->pfik_flags & PFI_IFLAG_SKIP)
8964 return (PF_PASS);
8965
8966 #ifdef DIAGNOSTIC
8967 if ((m->m_flags & M_PKTHDR) == 0)
8968 panic("non-M_PKTHDR is passed to pf_test6");
8969 #endif /* DIAGNOSTIC */
8970
8971 h = mtod(m, struct ip6_hdr *);
8972
8973 if (m->m_pkthdr.len < (int)sizeof (*h)) {
8974 action = PF_DROP;
8975 REASON_SET(&reason, PFRES_SHORT);
8976 log = 1;
8977 goto done;
8978 }
8979
8980 /* We do IP header normalization and packet reassembly here */
8981 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
8982 action = PF_DROP;
8983 goto done;
8984 }
8985 m = *m0; /* pf_normalize messes with m0 */
8986 h = mtod(m, struct ip6_hdr *);
8987
8988 #if 1
8989 /*
8990 * we do not support jumbogram yet. if we keep going, zero ip6_plen
8991 * will do something bad, so drop the packet for now.
8992 */
8993 if (htons(h->ip6_plen) == 0) {
8994 action = PF_DROP;
8995 REASON_SET(&reason, PFRES_NORM); /*XXX*/
8996 goto done;
8997 }
8998 #endif
8999
9000 pd.src = (struct pf_addr *)&h->ip6_src;
9001 pd.dst = (struct pf_addr *)&h->ip6_dst;
9002 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
9003 pd.ip_sum = NULL;
9004 pd.af = AF_INET6;
9005 pd.tos = 0;
9006 pd.tot_len = ntohs(h->ip6_plen) + sizeof (struct ip6_hdr);
9007 pd.eh = eh;
9008
9009 off = ((caddr_t)h - m->m_data) + sizeof (struct ip6_hdr);
9010 pd.proto = h->ip6_nxt;
9011 #ifndef NO_APPLE_EXTENSIONS
9012 pd.proto_variant = 0;
9013 pd.mp = m;
9014 pd.lmw = 0;
9015 #endif
9016 do {
9017 switch (pd.proto) {
9018 case IPPROTO_FRAGMENT:
9019 action = pf_test_fragment(&r, dir, kif, m, h,
9020 &pd, &a, &ruleset);
9021 if (action == PF_DROP)
9022 REASON_SET(&reason, PFRES_FRAG);
9023 goto done;
9024 case IPPROTO_ROUTING: {
9025 struct ip6_rthdr rthdr;
9026
9027 if (rh_cnt++) {
9028 DPFPRINTF(PF_DEBUG_MISC,
9029 ("pf: IPv6 more than one rthdr\n"));
9030 action = PF_DROP;
9031 REASON_SET(&reason, PFRES_IPOPTIONS);
9032 log = 1;
9033 goto done;
9034 }
9035 if (!pf_pull_hdr(m, off, &rthdr, sizeof (rthdr), NULL,
9036 &reason, pd.af)) {
9037 DPFPRINTF(PF_DEBUG_MISC,
9038 ("pf: IPv6 short rthdr\n"));
9039 action = PF_DROP;
9040 REASON_SET(&reason, PFRES_SHORT);
9041 log = 1;
9042 goto done;
9043 }
9044 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
9045 DPFPRINTF(PF_DEBUG_MISC,
9046 ("pf: IPv6 rthdr0\n"));
9047 action = PF_DROP;
9048 REASON_SET(&reason, PFRES_IPOPTIONS);
9049 log = 1;
9050 goto done;
9051 }
9052 /* FALLTHROUGH */
9053 }
9054 case IPPROTO_AH:
9055 case IPPROTO_HOPOPTS:
9056 case IPPROTO_DSTOPTS: {
9057 /* get next header and header length */
9058 struct ip6_ext opt6;
9059
9060 if (!pf_pull_hdr(m, off, &opt6, sizeof (opt6),
9061 NULL, &reason, pd.af)) {
9062 DPFPRINTF(PF_DEBUG_MISC,
9063 ("pf: IPv6 short opt\n"));
9064 action = PF_DROP;
9065 log = 1;
9066 goto done;
9067 }
9068 if (pd.proto == IPPROTO_AH)
9069 off += (opt6.ip6e_len + 2) * 4;
9070 else
9071 off += (opt6.ip6e_len + 1) * 8;
9072 pd.proto = opt6.ip6e_nxt;
9073 /* goto the next header */
9074 break;
9075 }
9076 default:
9077 terminal++;
9078 break;
9079 }
9080 } while (!terminal);
9081
9082 /* if there's no routing header, use unmodified mbuf for checksumming */
9083 if (!n)
9084 n = m;
9085
9086 switch (pd.proto) {
9087
9088 case IPPROTO_TCP: {
9089 struct tcphdr th;
9090
9091 pd.hdr.tcp = &th;
9092 if (!pf_pull_hdr(m, off, &th, sizeof (th),
9093 &action, &reason, AF_INET6)) {
9094 log = action != PF_PASS;
9095 goto done;
9096 }
9097 pd.p_len = pd.tot_len - off - (th.th_off << 2);
9098 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
9099 #ifndef NO_APPLE_EXTENSIONS
9100 if (pd.lmw < 0)
9101 goto done;
9102 PF_APPLE_UPDATE_PDESC_IPv6();
9103 #endif
9104 if (action == PF_DROP)
9105 goto done;
9106 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
9107 &reason);
9108 #ifndef NO_APPLE_EXTENSIONS
9109 if (pd.lmw < 0)
9110 goto done;
9111 PF_APPLE_UPDATE_PDESC_IPv6();
9112 #endif
9113 if (action == PF_PASS) {
9114 #if NPFSYNC
9115 pfsync_update_state(s);
9116 #endif /* NPFSYNC */
9117 r = s->rule.ptr;
9118 a = s->anchor.ptr;
9119 log = s->log;
9120 } else if (s == NULL)
9121 action = pf_test_rule(&r, &s, dir, kif,
9122 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9123 break;
9124 }
9125
9126 case IPPROTO_UDP: {
9127 struct udphdr uh;
9128
9129 pd.hdr.udp = &uh;
9130 if (!pf_pull_hdr(m, off, &uh, sizeof (uh),
9131 &action, &reason, AF_INET6)) {
9132 log = action != PF_PASS;
9133 goto done;
9134 }
9135 if (uh.uh_dport == 0 ||
9136 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
9137 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
9138 action = PF_DROP;
9139 REASON_SET(&reason, PFRES_SHORT);
9140 goto done;
9141 }
9142 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
9143 &reason);
9144 #ifndef NO_APPLE_EXTENSIONS
9145 if (pd.lmw < 0)
9146 goto done;
9147 PF_APPLE_UPDATE_PDESC_IPv6();
9148 #endif
9149 if (action == PF_PASS) {
9150 #if NPFSYNC
9151 pfsync_update_state(s);
9152 #endif /* NPFSYNC */
9153 r = s->rule.ptr;
9154 a = s->anchor.ptr;
9155 log = s->log;
9156 } else if (s == NULL)
9157 action = pf_test_rule(&r, &s, dir, kif,
9158 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9159 break;
9160 }
9161
9162 case IPPROTO_ICMPV6: {
9163 struct icmp6_hdr ih;
9164
9165 pd.hdr.icmp6 = &ih;
9166 if (!pf_pull_hdr(m, off, &ih, sizeof (ih),
9167 &action, &reason, AF_INET6)) {
9168 log = action != PF_PASS;
9169 goto done;
9170 }
9171 action = pf_test_state_icmp(&s, dir, kif,
9172 m, off, h, &pd, &reason);
9173 #ifndef NO_APPLE_EXTENSIONS
9174 if (pd.lmw < 0)
9175 goto done;
9176 PF_APPLE_UPDATE_PDESC_IPv6();
9177 #endif
9178 if (action == PF_PASS) {
9179 #if NPFSYNC
9180 pfsync_update_state(s);
9181 #endif /* NPFSYNC */
9182 r = s->rule.ptr;
9183 a = s->anchor.ptr;
9184 log = s->log;
9185 } else if (s == NULL)
9186 action = pf_test_rule(&r, &s, dir, kif,
9187 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9188 break;
9189 }
9190
9191 #ifndef NO_APPLE_EXTENSIONS
9192 case IPPROTO_ESP: {
9193 struct pf_esp_hdr esp;
9194
9195 pd.hdr.esp = &esp;
9196 if (!pf_pull_hdr(m, off, &esp, sizeof (esp), &action, &reason,
9197 AF_INET6)) {
9198 log = action != PF_PASS;
9199 goto done;
9200 }
9201 action = pf_test_state_esp(&s, dir, kif, off, &pd);
9202 if (pd.lmw < 0)
9203 goto done;
9204 PF_APPLE_UPDATE_PDESC_IPv6();
9205 if (action == PF_PASS) {
9206 #if NPFSYNC
9207 pfsync_update_state(s);
9208 #endif /* NPFSYNC */
9209 r = s->rule.ptr;
9210 a = s->anchor.ptr;
9211 log = s->log;
9212 } else if (s == NULL)
9213 action = pf_test_rule(&r, &s, dir, kif,
9214 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9215 break;
9216 }
9217
9218 case IPPROTO_GRE: {
9219 struct pf_grev1_hdr grev1;
9220
9221 pd.hdr.grev1 = &grev1;
9222 if (!pf_pull_hdr(m, off, &grev1, sizeof (grev1), &action,
9223 &reason, AF_INET6)) {
9224 log = (action != PF_PASS);
9225 goto done;
9226 }
9227 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
9228 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
9229 if (ntohs(grev1.payload_length) >
9230 m->m_pkthdr.len - off) {
9231 action = PF_DROP;
9232 REASON_SET(&reason, PFRES_SHORT);
9233 goto done;
9234 }
9235 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
9236 if (pd.lmw < 0)
9237 goto done;
9238 PF_APPLE_UPDATE_PDESC_IPv6();
9239 if (action == PF_PASS) {
9240 #if NPFSYNC
9241 pfsync_update_state(s);
9242 #endif /* NPFSYNC */
9243 r = s->rule.ptr;
9244 a = s->anchor.ptr;
9245 log = s->log;
9246 break;
9247 } else if (s == NULL) {
9248 action = pf_test_rule(&r, &s, dir, kif, m, off,
9249 h, &pd, &a, &ruleset, &ip6intrq);
9250 if (action == PF_PASS)
9251 break;
9252 }
9253 }
9254
9255 /* not GREv1/PPTP, so treat as ordinary GRE... */
9256 }
9257 #endif
9258
9259 default:
9260 action = pf_test_state_other(&s, dir, kif, &pd);
9261 #ifndef NO_APPLE_EXTENSIONS
9262 if (pd.lmw < 0)
9263 goto done;
9264 PF_APPLE_UPDATE_PDESC_IPv6();
9265 #endif
9266 if (action == PF_PASS) {
9267 #if NPFSYNC
9268 pfsync_update_state(s);
9269 #endif /* NPFSYNC */
9270 r = s->rule.ptr;
9271 a = s->anchor.ptr;
9272 log = s->log;
9273 } else if (s == NULL)
9274 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
9275 &pd, &a, &ruleset, &ip6intrq);
9276 break;
9277 }
9278
9279 done:
9280 #ifndef NO_APPLE_EXTENSIONS
9281 *m0 = pd.mp;
9282 PF_APPLE_UPDATE_PDESC_IPv6();
9283 #endif
9284
9285 if (n != m) {
9286 m_freem(n);
9287 n = NULL;
9288 }
9289
9290 /* handle dangerous IPv6 extension headers. */
9291 if (action == PF_PASS && rh_cnt &&
9292 !((s && s->allow_opts) || r->allow_opts)) {
9293 action = PF_DROP;
9294 REASON_SET(&reason, PFRES_IPOPTIONS);
9295 log = 1;
9296 DPFPRINTF(PF_DEBUG_MISC,
9297 ("pf: dropping packet with dangerous v6 headers\n"));
9298 }
9299
9300 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid))
9301 (void) pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0,
9302 r->rtableid);
9303
9304 #if ALTQ
9305 if (action == PF_PASS && r->qid) {
9306 if (pd.tos & IPTOS_LOWDELAY)
9307 pd.pf_mtag->qid = r->pqid;
9308 else
9309 pd.pf_mtag->qid = r->qid;
9310 /* add hints for ecn */
9311 pd.pf_mtag->hdr = h;
9312 }
9313 #endif /* ALTQ */
9314
9315 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
9316 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
9317 (s->nat_rule.ptr->action == PF_RDR ||
9318 s->nat_rule.ptr->action == PF_BINAT) &&
9319 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
9320 pd.pf_mtag->flags |= PF_TAG_TRANSLATE_LOCALHOST;
9321
9322 if (log) {
9323 struct pf_rule *lr;
9324
9325 if (s != NULL && s->nat_rule.ptr != NULL &&
9326 s->nat_rule.ptr->log & PF_LOG_ALL)
9327 lr = s->nat_rule.ptr;
9328 else
9329 lr = r;
9330 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
9331 &pd);
9332 }
9333
9334 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
9335 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
9336
9337 if (action == PF_PASS || r->action == PF_DROP) {
9338 dirndx = (dir == PF_OUT);
9339 r->packets[dirndx]++;
9340 r->bytes[dirndx] += pd.tot_len;
9341 if (a != NULL) {
9342 a->packets[dirndx]++;
9343 a->bytes[dirndx] += pd.tot_len;
9344 }
9345 if (s != NULL) {
9346 sk = s->state_key;
9347 if (s->nat_rule.ptr != NULL) {
9348 s->nat_rule.ptr->packets[dirndx]++;
9349 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
9350 }
9351 if (s->src_node != NULL) {
9352 s->src_node->packets[dirndx]++;
9353 s->src_node->bytes[dirndx] += pd.tot_len;
9354 }
9355 if (s->nat_src_node != NULL) {
9356 s->nat_src_node->packets[dirndx]++;
9357 s->nat_src_node->bytes[dirndx] += pd.tot_len;
9358 }
9359 dirndx = (dir == sk->direction) ? 0 : 1;
9360 s->packets[dirndx]++;
9361 s->bytes[dirndx] += pd.tot_len;
9362 }
9363 tr = r;
9364 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
9365 if (nr != NULL) {
9366 struct pf_addr *x;
9367 /*
9368 * XXX: we need to make sure that the addresses
9369 * passed to pfr_update_stats() are the same than
9370 * the addresses used during matching (pfr_match)
9371 */
9372 if (r == &pf_default_rule) {
9373 tr = nr;
9374 x = (s == NULL || sk->direction == dir) ?
9375 &pd.baddr : &pd.naddr;
9376 } else {
9377 x = (s == NULL || sk->direction == dir) ?
9378 &pd.naddr : &pd.baddr;
9379 }
9380 if (x == &pd.baddr || s == NULL) {
9381 if (dir == PF_OUT)
9382 pd.src = x;
9383 else
9384 pd.dst = x;
9385 }
9386 }
9387 if (tr->src.addr.type == PF_ADDR_TABLE)
9388 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
9389 sk->direction == dir) ? pd.src : pd.dst, pd.af,
9390 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9391 tr->src.neg);
9392 if (tr->dst.addr.type == PF_ADDR_TABLE)
9393 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
9394 sk->direction == dir) ? pd.dst : pd.src, pd.af,
9395 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9396 tr->dst.neg);
9397 }
9398
9399 #if 0
9400 if (action == PF_SYNPROXY_DROP) {
9401 m_freem(*m0);
9402 *m0 = NULL;
9403 action = PF_PASS;
9404 } else if (r->rt)
9405 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9406 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9407 #else
9408 #ifndef NO_APPLE_EXTENSIONS
9409 VERIFY(m == NULL || pd.mp == NULL || pd.mp == m);
9410
9411 if (*m0) {
9412 if (pd.lmw < 0) {
9413 REASON_SET(&reason, PFRES_MEMORY);
9414 action = PF_DROP;
9415 }
9416
9417 if (action == PF_DROP) {
9418 m_freem(*m0);
9419 *m0 = NULL;
9420 return (PF_DROP);
9421 }
9422
9423 *m0 = m;
9424 }
9425
9426 if (action == PF_SYNPROXY_DROP) {
9427 m_freem(*m0);
9428 *m0 = NULL;
9429 action = PF_PASS;
9430 } else if (r->rt) {
9431 if (action == PF_PASS) {
9432 m = *m0;
9433 h = mtod(m, struct ip6_hdr *);
9434 }
9435
9436 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9437 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9438 }
9439 #else
9440 if (action != PF_SYNPROXY_DROP && r->rt)
9441 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9442 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9443
9444 if (action == PF_PASS) {
9445 m = *m0;
9446 h = mtod(m, struct ip6_hdr *);
9447 }
9448
9449 if (action == PF_SYNPROXY_DROP) {
9450 m_freem(*m0);
9451 *m0 = NULL;
9452 action = PF_PASS;
9453 }
9454 #endif
9455 #endif
9456
9457 return (action);
9458 }
9459 #endif /* INET6 */
9460
9461 static int
9462 pf_check_congestion(struct ifqueue *ifq)
9463 {
9464 #pragma unused(ifq)
9465 return (0);
9466 }
9467
9468 void
9469 pool_init(struct pool *pp, size_t size, unsigned int align, unsigned int ioff,
9470 int flags, const char *wchan, void *palloc)
9471 {
9472 #pragma unused(align, ioff, flags, palloc)
9473 bzero(pp, sizeof (*pp));
9474 pp->pool_zone = zinit(size, 1024 * size, PAGE_SIZE, wchan);
9475 if (pp->pool_zone != NULL) {
9476 zone_change(pp->pool_zone, Z_EXPAND, TRUE);
9477 pp->pool_hiwat = pp->pool_limit = (unsigned int)-1;
9478 pp->pool_name = wchan;
9479 }
9480 }
9481
9482 /* Zones cannot be currently destroyed */
9483 void
9484 pool_destroy(struct pool *pp)
9485 {
9486 #pragma unused(pp)
9487 }
9488
9489 void
9490 pool_sethiwat(struct pool *pp, int n)
9491 {
9492 pp->pool_hiwat = n; /* Currently unused */
9493 }
9494
9495 void
9496 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
9497 {
9498 #pragma unused(warnmess, ratecap)
9499 pp->pool_limit = n;
9500 }
9501
9502 void *
9503 pool_get(struct pool *pp, int flags)
9504 {
9505 void *buf;
9506
9507 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9508
9509 if (pp->pool_count > pp->pool_limit) {
9510 DPFPRINTF(PF_DEBUG_NOISY,
9511 ("pf: pool %s hard limit reached (%d)\n",
9512 pp->pool_name != NULL ? pp->pool_name : "unknown",
9513 pp->pool_limit));
9514 pp->pool_fails++;
9515 return (NULL);
9516 }
9517
9518 buf = zalloc_canblock(pp->pool_zone, (flags & (PR_NOWAIT | PR_WAITOK)));
9519 if (buf != NULL) {
9520 pp->pool_count++;
9521 VERIFY(pp->pool_count != 0);
9522 }
9523 return (buf);
9524 }
9525
9526 void
9527 pool_put(struct pool *pp, void *v)
9528 {
9529 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9530
9531 zfree(pp->pool_zone, v);
9532 VERIFY(pp->pool_count != 0);
9533 pp->pool_count--;
9534 }
9535
9536 struct pf_mtag *
9537 pf_find_mtag(struct mbuf *m)
9538 {
9539 #if !PF_PKTHDR
9540 struct m_tag *mtag;
9541
9542 if ((mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
9543 KERNEL_TAG_TYPE_PF, NULL)) == NULL)
9544 return (NULL);
9545
9546 return ((struct pf_mtag *)(mtag + 1));
9547 #else
9548 if (!(m->m_flags & M_PKTHDR))
9549 return (NULL);
9550
9551 return (&m->m_pkthdr.pf_mtag);
9552 #endif /* PF_PKTHDR */
9553 }
9554
9555 struct pf_mtag *
9556 pf_get_mtag(struct mbuf *m)
9557 {
9558 #if !PF_PKTHDR
9559 struct m_tag *mtag;
9560
9561 if ((mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_PF,
9562 NULL)) == NULL) {
9563 mtag = m_tag_alloc(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_PF,
9564 sizeof (struct pf_mtag), M_NOWAIT);
9565 if (mtag == NULL)
9566 return (NULL);
9567 bzero(mtag + 1, sizeof (struct pf_mtag));
9568 m_tag_prepend(m, mtag);
9569 }
9570 return ((struct pf_mtag *)(mtag + 1));
9571 #else
9572 return (pf_find_mtag(m));
9573 #endif /* PF_PKTHDR */
9574 }
9575
9576 uint64_t
9577 pf_time_second(void)
9578 {
9579 struct timeval t;
9580
9581 microuptime(&t);
9582 return (t.tv_sec);
9583 }
9584
9585 uint64_t
9586 pf_calendar_time_second(void)
9587 {
9588 struct timeval t;
9589
9590 microtime(&t);
9591 return (t.tv_sec);
9592 }
9593
9594 static void *
9595 hook_establish(struct hook_desc_head *head, int tail, hook_fn_t fn, void *arg)
9596 {
9597 struct hook_desc *hd;
9598
9599 hd = _MALLOC(sizeof(*hd), M_DEVBUF, M_WAITOK);
9600 if (hd == NULL)
9601 return (NULL);
9602
9603 hd->hd_fn = fn;
9604 hd->hd_arg = arg;
9605 if (tail)
9606 TAILQ_INSERT_TAIL(head, hd, hd_list);
9607 else
9608 TAILQ_INSERT_HEAD(head, hd, hd_list);
9609
9610 return (hd);
9611 }
9612
9613 static void
9614 hook_runloop(struct hook_desc_head *head, int flags)
9615 {
9616 struct hook_desc *hd;
9617
9618 if (!(flags & HOOK_REMOVE)) {
9619 if (!(flags & HOOK_ABORT))
9620 TAILQ_FOREACH(hd, head, hd_list)
9621 hd->hd_fn(hd->hd_arg);
9622 } else {
9623 while (!!(hd = TAILQ_FIRST(head))) {
9624 TAILQ_REMOVE(head, hd, hd_list);
9625 if (!(flags & HOOK_ABORT))
9626 hd->hd_fn(hd->hd_arg);
9627 if (flags & HOOK_FREE)
9628 _FREE(hd, M_DEVBUF);
9629 }
9630 }
9631 }