]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf.c
0597ffd4dd1322594e9938a729fdf88bebcb7289
[apple/xnu.git] / bsd / net / pf.c
1 /*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/mbuf.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
75 #include <sys/time.h>
76 #include <sys/proc.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
79
80 #include <libkern/crypto/md5.h>
81 #include <libkern/libkern.h>
82
83 #include <mach/thread_act.h>
84
85 #include <net/if.h>
86 #include <net/if_types.h>
87 #include <net/bpf.h>
88 #include <net/route.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/udp.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/in_pcb.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/udp_var.h>
104 #include <netinet/icmp_var.h>
105 #include <net/if_ether.h>
106 #include <net/ethernet.h>
107
108 #include <net/pfvar.h>
109 #include <net/if_pflog.h>
110
111 #if NPFSYNC
112 #include <net/if_pfsync.h>
113 #endif /* NPFSYNC */
114
115 #if INET6
116 #include <netinet/ip6.h>
117 #include <netinet6/in6_pcb.h>
118 #include <netinet6/ip6_var.h>
119 #include <netinet/icmp6.h>
120 #include <netinet6/nd6.h>
121 #endif /* INET6 */
122
123 #ifndef NO_APPLE_EXTENSIONS
124 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
125 #else
126 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
127 #endif
128
129 /* XXX: should be in header somewhere */
130 #define satosin(sa) ((struct sockaddr_in *)(sa))
131 #define sintosa(sin) ((struct sockaddr *)(sin))
132
133 /*
134 * On Mac OS X, the rtableid value is treated as the interface scope
135 * value that is equivalent to the interface index used for scoped
136 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
137 * as per definition of ifindex which is a positive, non-zero number.
138 * The other BSDs treat a negative rtableid value as invalid, hence
139 * the test against INT_MAX to handle userland apps which initialize
140 * the field with a negative number.
141 */
142 #define PF_RTABLEID_IS_VALID(r) \
143 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
144
145 /*
146 * Global variables
147 */
148 lck_mtx_t *pf_lock;
149 lck_rw_t *pf_perim_lock;
150
151 /* state tables */
152 struct pf_state_tree_lan_ext pf_statetbl_lan_ext;
153 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy;
154
155 struct pf_palist pf_pabuf;
156 struct pf_status pf_status;
157
158 #if ALTQ
159 struct pf_altqqueue pf_altqs[2];
160 struct pf_altqqueue *pf_altqs_active;
161 struct pf_altqqueue *pf_altqs_inactive;
162 u_int32_t ticket_altqs_active;
163 u_int32_t ticket_altqs_inactive;
164 int altqs_inactive_open;
165 #endif /* ALTQ */
166 u_int32_t ticket_pabuf;
167
168 static MD5_CTX pf_tcp_secret_ctx;
169 static u_char pf_tcp_secret[16];
170 static int pf_tcp_secret_init;
171 static int pf_tcp_iss_off;
172
173 static struct pf_anchor_stackframe {
174 struct pf_ruleset *rs;
175 struct pf_rule *r;
176 struct pf_anchor_node *parent;
177 struct pf_anchor *child;
178 } pf_anchor_stack[64];
179
180 struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
181 struct pool pf_state_pl, pf_state_key_pl;
182 #if ALTQ
183 struct pool pf_altq_pl;
184 #endif /* ALTQ */
185
186 #ifndef NO_APPLE_EXTENSIONS
187 typedef void (*hook_fn_t)(void *);
188
189 struct hook_desc {
190 TAILQ_ENTRY(hook_desc) hd_list;
191 hook_fn_t hd_fn;
192 void *hd_arg;
193 };
194
195 #define HOOK_REMOVE 0x01
196 #define HOOK_FREE 0x02
197 #define HOOK_ABORT 0x04
198
199 static void *hook_establish(struct hook_desc_head *, int,
200 hook_fn_t, void *);
201 static void hook_runloop(struct hook_desc_head *, int flags);
202
203 struct pool pf_app_state_pl;
204 static void pf_print_addr(struct pf_addr *addr, sa_family_t af);
205 static void pf_print_sk_host(struct pf_state_host *, u_int8_t, int,
206 u_int8_t);
207 #endif
208
209 static void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
210
211 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
212 u_int32_t);
213 static void pf_add_threshold(struct pf_threshold *);
214 static int pf_check_threshold(struct pf_threshold *);
215
216 static void pf_change_ap(int, struct mbuf *, struct pf_addr *,
217 u_int16_t *, u_int16_t *, u_int16_t *,
218 struct pf_addr *, u_int16_t, u_int8_t, sa_family_t);
219 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
220 struct tcphdr *, struct pf_state_peer *);
221 #if INET6
222 static void pf_change_a6(struct pf_addr *, u_int16_t *,
223 struct pf_addr *, u_int8_t);
224 #endif /* INET6 */
225 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
226 struct pf_addr *, struct pf_addr *, u_int16_t,
227 u_int16_t *, u_int16_t *, u_int16_t *,
228 u_int16_t *, u_int8_t, sa_family_t);
229 static void pf_send_tcp(const struct pf_rule *, sa_family_t,
230 const struct pf_addr *, const struct pf_addr *,
231 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
232 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
233 u_int16_t, struct ether_header *, struct ifnet *);
234 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
235 sa_family_t, struct pf_rule *);
236 #ifndef NO_APPLE_EXTENSIONS
237 static struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
238 int, int, struct pfi_kif *, struct pf_addr *,
239 union pf_state_xport *, struct pf_addr *,
240 union pf_state_xport *, int);
241 static struct pf_rule *pf_get_translation_aux(struct pf_pdesc *,
242 struct mbuf *, int, int, struct pfi_kif *,
243 struct pf_src_node **, struct pf_addr *,
244 union pf_state_xport *, struct pf_addr *,
245 union pf_state_xport *, struct pf_addr *,
246 union pf_state_xport *);
247 #else
248 struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
249 int, int, struct pfi_kif *,
250 struct pf_addr *, u_int16_t, struct pf_addr *,
251 u_int16_t, int);
252 struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
253 int, int, struct pfi_kif *, struct pf_src_node **,
254 struct pf_addr *, u_int16_t,
255 struct pf_addr *, u_int16_t,
256 struct pf_addr *, u_int16_t *);
257 #endif
258 static void pf_attach_state(struct pf_state_key *,
259 struct pf_state *, int);
260 static void pf_detach_state(struct pf_state *, int);
261 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
262 static int pf_test_rule(struct pf_rule **, struct pf_state **,
263 int, struct pfi_kif *, struct mbuf *, int,
264 void *, struct pf_pdesc *, struct pf_rule **,
265 struct pf_ruleset **, struct ifqueue *);
266 static int pf_test_fragment(struct pf_rule **, int,
267 struct pfi_kif *, struct mbuf *, void *,
268 struct pf_pdesc *, struct pf_rule **,
269 struct pf_ruleset **);
270 static int pf_test_state_tcp(struct pf_state **, int,
271 struct pfi_kif *, struct mbuf *, int,
272 void *, struct pf_pdesc *, u_short *);
273 #ifndef NO_APPLE_EXTENSIONS
274 static int pf_test_state_udp(struct pf_state **, int,
275 struct pfi_kif *, struct mbuf *, int,
276 void *, struct pf_pdesc *, u_short *);
277 #else
278 static int pf_test_state_udp(struct pf_state **, int,
279 struct pfi_kif *, struct mbuf *, int,
280 void *, struct pf_pdesc *);
281 #endif
282 static int pf_test_state_icmp(struct pf_state **, int,
283 struct pfi_kif *, struct mbuf *, int,
284 void *, struct pf_pdesc *, u_short *);
285 static int pf_test_state_other(struct pf_state **, int,
286 struct pfi_kif *, struct pf_pdesc *);
287 static int pf_match_tag(struct mbuf *, struct pf_rule *,
288 struct pf_mtag *, int *);
289 static void pf_hash(struct pf_addr *, struct pf_addr *,
290 struct pf_poolhashkey *, sa_family_t);
291 static int pf_map_addr(u_int8_t, struct pf_rule *,
292 struct pf_addr *, struct pf_addr *,
293 struct pf_addr *, struct pf_src_node **);
294 #ifndef NO_APPLE_EXTENSIONS
295 static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *,
296 struct pf_rule *, struct pf_addr *,
297 union pf_state_xport *, struct pf_addr *,
298 union pf_state_xport *, struct pf_addr *,
299 union pf_state_xport *, struct pf_src_node **);
300 #else
301 int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *,
302 struct pf_addr *, struct pf_addr *, u_int16_t,
303 struct pf_addr *, u_int16_t *, u_int16_t, u_int16_t,
304 struct pf_src_node **);
305 #endif
306 static void pf_route(struct mbuf **, struct pf_rule *, int,
307 struct ifnet *, struct pf_state *,
308 struct pf_pdesc *);
309 #if INET6
310 static void pf_route6(struct mbuf **, struct pf_rule *, int,
311 struct ifnet *, struct pf_state *,
312 struct pf_pdesc *);
313 #endif /* INET6 */
314 static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
315 sa_family_t);
316 static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
317 sa_family_t);
318 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
319 u_int16_t);
320 static void pf_set_rt_ifp(struct pf_state *,
321 struct pf_addr *);
322 static int pf_check_proto_cksum(struct mbuf *, int, int,
323 u_int8_t, sa_family_t);
324 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
325 struct pf_addr_wrap *);
326 static struct pf_state *pf_find_state(struct pfi_kif *,
327 struct pf_state_key_cmp *, u_int);
328 static int pf_src_connlimit(struct pf_state **);
329 static void pf_stateins_err(const char *, struct pf_state *,
330 struct pfi_kif *);
331 static int pf_check_congestion(struct ifqueue *);
332
333 #ifndef NO_APPLE_EXTENSIONS
334 #if 0
335 static const char *pf_pptp_ctrl_type_name(u_int16_t code);
336 #endif
337 static void pf_pptp_handler(struct pf_state *, int, int,
338 struct pf_pdesc *, struct pfi_kif *);
339 static void pf_pptp_unlink(struct pf_state *);
340 static void pf_grev1_unlink(struct pf_state *);
341 static int pf_test_state_grev1(struct pf_state **, int,
342 struct pfi_kif *, int, struct pf_pdesc *);
343 static int pf_ike_compare(struct pf_app_state *,
344 struct pf_app_state *);
345 static int pf_test_state_esp(struct pf_state **, int,
346 struct pfi_kif *, int, struct pf_pdesc *);
347 #endif
348
349 extern struct pool pfr_ktable_pl;
350 extern struct pool pfr_kentry_pl;
351 extern int path_mtu_discovery;
352
353 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
354 { &pf_state_pl, PFSTATE_HIWAT },
355 { &pf_app_state_pl, PFAPPSTATE_HIWAT },
356 { &pf_src_tree_pl, PFSNODE_HIWAT },
357 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
358 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
359 { &pfr_kentry_pl, PFR_KENTRY_HIWAT }
360 };
361
362 #ifndef NO_APPLE_EXTENSIONS
363 struct mbuf *
364 pf_lazy_makewritable(struct pf_pdesc *pd, struct mbuf *m, int len)
365 {
366 if (pd->lmw < 0)
367 return (0);
368
369 VERIFY(m == pd->mp);
370
371 if (len > pd->lmw) {
372 if (m_makewritable(&m, 0, len, M_DONTWAIT))
373 len = -1;
374 pd->lmw = len;
375 if (len >= 0 && m != pd->mp) {
376 pd->mp = m;
377 pd->pf_mtag = pf_find_mtag(m);
378
379 switch (pd->af) {
380 case AF_INET: {
381 struct ip *h = mtod(m, struct ip *);
382 pd->src = (struct pf_addr *)&h->ip_src;
383 pd->dst = (struct pf_addr *)&h->ip_dst;
384 pd->ip_sum = &h->ip_sum;
385 break;
386 }
387 #if INET6
388 case AF_INET6: {
389 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
390 pd->src = (struct pf_addr *)&h->ip6_src;
391 pd->dst = (struct pf_addr *)&h->ip6_dst;
392 break;
393 }
394 #endif /* INET6 */
395 }
396 }
397 }
398
399 return (len < 0 ? 0 : m);
400 }
401
402 static const int *
403 pf_state_lookup_aux(struct pf_state **state, struct pfi_kif *kif,
404 int direction, int *action)
405 {
406 if (*state == NULL || (*state)->timeout == PFTM_PURGE) {
407 *action = PF_DROP;
408 return (action);
409 }
410
411 if (direction == PF_OUT &&
412 (((*state)->rule.ptr->rt == PF_ROUTETO &&
413 (*state)->rule.ptr->direction == PF_OUT) ||
414 ((*state)->rule.ptr->rt == PF_REPLYTO &&
415 (*state)->rule.ptr->direction == PF_IN)) &&
416 (*state)->rt_kif != NULL && (*state)->rt_kif != kif) {
417 *action = PF_PASS;
418 return (action);
419 }
420
421 return (0);
422 }
423
424 #define STATE_LOOKUP() \
425 do { \
426 int action; \
427 *state = pf_find_state(kif, &key, direction); \
428 if (pf_state_lookup_aux(state, kif, direction, &action)) \
429 return (action); \
430 } while (0)
431
432 #define STATE_ADDR_TRANSLATE(sk) \
433 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
434 ((sk)->af == AF_INET6 && \
435 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
436 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
437 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
438
439 #define STATE_TRANSLATE(sk) \
440 (STATE_ADDR_TRANSLATE(sk) || \
441 (sk)->lan.xport.port != (sk)->gwy.xport.port)
442
443 #define STATE_GRE_TRANSLATE(sk) \
444 (STATE_ADDR_TRANSLATE(sk) || \
445 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
446
447 #else
448 #define STATE_LOOKUP() \
449 do { \
450 *state = pf_find_state(kif, &key, direction); \
451 if (*state == NULL || (*state)->timeout == PFTM_PURGE) \
452 return (PF_DROP); \
453 if (direction == PF_OUT && \
454 (((*state)->rule.ptr->rt == PF_ROUTETO && \
455 (*state)->rule.ptr->direction == PF_OUT) || \
456 ((*state)->rule.ptr->rt == PF_REPLYTO && \
457 (*state)->rule.ptr->direction == PF_IN)) && \
458 (*state)->rt_kif != NULL && \
459 (*state)->rt_kif != kif) \
460 return (PF_PASS); \
461 } while (0)
462
463 #define STATE_TRANSLATE(sk) \
464 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
465 ((sk)->af == AF_INET6 && \
466 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
467 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
468 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3])) || \
469 (sk)->lan.port != (sk)->gwy.port
470 #endif
471
472 #define BOUND_IFACE(r, k) \
473 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
474
475 #define STATE_INC_COUNTERS(s) \
476 do { \
477 s->rule.ptr->states++; \
478 VERIFY(s->rule.ptr->states != 0); \
479 if (s->anchor.ptr != NULL) { \
480 s->anchor.ptr->states++; \
481 VERIFY(s->anchor.ptr->states != 0); \
482 } \
483 if (s->nat_rule.ptr != NULL) { \
484 s->nat_rule.ptr->states++; \
485 VERIFY(s->nat_rule.ptr->states != 0); \
486 } \
487 } while (0)
488
489 #define STATE_DEC_COUNTERS(s) \
490 do { \
491 if (s->nat_rule.ptr != NULL) { \
492 VERIFY(s->nat_rule.ptr->states > 0); \
493 s->nat_rule.ptr->states--; \
494 } \
495 if (s->anchor.ptr != NULL) { \
496 VERIFY(s->anchor.ptr->states > 0); \
497 s->anchor.ptr->states--; \
498 } \
499 VERIFY(s->rule.ptr->states > 0); \
500 s->rule.ptr->states--; \
501 } while (0)
502
503 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
504 static __inline int pf_state_compare_lan_ext(struct pf_state_key *,
505 struct pf_state_key *);
506 static __inline int pf_state_compare_ext_gwy(struct pf_state_key *,
507 struct pf_state_key *);
508 static __inline int pf_state_compare_id(struct pf_state *,
509 struct pf_state *);
510
511 struct pf_src_tree tree_src_tracking;
512
513 struct pf_state_tree_id tree_id;
514 struct pf_state_queue state_list;
515
516 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
517 RB_GENERATE(pf_state_tree_lan_ext, pf_state_key,
518 entry_lan_ext, pf_state_compare_lan_ext);
519 RB_GENERATE(pf_state_tree_ext_gwy, pf_state_key,
520 entry_ext_gwy, pf_state_compare_ext_gwy);
521 RB_GENERATE(pf_state_tree_id, pf_state,
522 entry_id, pf_state_compare_id);
523
524 #define PF_DT_SKIP_LANEXT 0x01
525 #define PF_DT_SKIP_EXTGWY 0x02
526
527 #ifndef NO_APPLE_EXTENSIONS
528 static const u_int16_t PF_PPTP_PORT = 1723;
529 static const u_int32_t PF_PPTP_MAGIC_NUMBER = 0x1A2B3C4D;
530
531 struct pf_pptp_hdr {
532 u_int16_t length;
533 u_int16_t type;
534 u_int32_t magic;
535 };
536
537 struct pf_pptp_ctrl_hdr {
538 u_int16_t type;
539 u_int16_t reserved_0;
540 };
541
542 struct pf_pptp_ctrl_generic {
543 u_int16_t data[0];
544 };
545
546 #define PF_PPTP_CTRL_TYPE_START_REQ 1
547 struct pf_pptp_ctrl_start_req {
548 u_int16_t protocol_version;
549 u_int16_t reserved_1;
550 u_int32_t framing_capabilities;
551 u_int32_t bearer_capabilities;
552 u_int16_t maximum_channels;
553 u_int16_t firmware_revision;
554 u_int8_t host_name[64];
555 u_int8_t vendor_string[64];
556 };
557
558 #define PF_PPTP_CTRL_TYPE_START_RPY 2
559 struct pf_pptp_ctrl_start_rpy {
560 u_int16_t protocol_version;
561 u_int8_t result_code;
562 u_int8_t error_code;
563 u_int32_t framing_capabilities;
564 u_int32_t bearer_capabilities;
565 u_int16_t maximum_channels;
566 u_int16_t firmware_revision;
567 u_int8_t host_name[64];
568 u_int8_t vendor_string[64];
569 };
570
571 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
572 struct pf_pptp_ctrl_stop_req {
573 u_int8_t reason;
574 u_int8_t reserved_1;
575 u_int16_t reserved_2;
576 };
577
578 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
579 struct pf_pptp_ctrl_stop_rpy {
580 u_int8_t reason;
581 u_int8_t error_code;
582 u_int16_t reserved_1;
583 };
584
585 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
586 struct pf_pptp_ctrl_echo_req {
587 u_int32_t identifier;
588 };
589
590 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
591 struct pf_pptp_ctrl_echo_rpy {
592 u_int32_t identifier;
593 u_int8_t result_code;
594 u_int8_t error_code;
595 u_int16_t reserved_1;
596 };
597
598 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
599 struct pf_pptp_ctrl_call_out_req {
600 u_int16_t call_id;
601 u_int16_t call_sernum;
602 u_int32_t min_bps;
603 u_int32_t bearer_type;
604 u_int32_t framing_type;
605 u_int16_t rxwindow_size;
606 u_int16_t proc_delay;
607 u_int8_t phone_num[64];
608 u_int8_t sub_addr[64];
609 };
610
611 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
612 struct pf_pptp_ctrl_call_out_rpy {
613 u_int16_t call_id;
614 u_int16_t peer_call_id;
615 u_int8_t result_code;
616 u_int8_t error_code;
617 u_int16_t cause_code;
618 u_int32_t connect_speed;
619 u_int16_t rxwindow_size;
620 u_int16_t proc_delay;
621 u_int32_t phy_channel_id;
622 };
623
624 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
625 struct pf_pptp_ctrl_call_in_1st {
626 u_int16_t call_id;
627 u_int16_t call_sernum;
628 u_int32_t bearer_type;
629 u_int32_t phy_channel_id;
630 u_int16_t dialed_number_len;
631 u_int16_t dialing_number_len;
632 u_int8_t dialed_num[64];
633 u_int8_t dialing_num[64];
634 u_int8_t sub_addr[64];
635 };
636
637 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
638 struct pf_pptp_ctrl_call_in_2nd {
639 u_int16_t call_id;
640 u_int16_t peer_call_id;
641 u_int8_t result_code;
642 u_int8_t error_code;
643 u_int16_t rxwindow_size;
644 u_int16_t txdelay;
645 u_int16_t reserved_1;
646 };
647
648 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
649 struct pf_pptp_ctrl_call_in_3rd {
650 u_int16_t call_id;
651 u_int16_t reserved_1;
652 u_int32_t connect_speed;
653 u_int16_t rxwindow_size;
654 u_int16_t txdelay;
655 u_int32_t framing_type;
656 };
657
658 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
659 struct pf_pptp_ctrl_call_clr {
660 u_int16_t call_id;
661 u_int16_t reserved_1;
662 };
663
664 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
665 struct pf_pptp_ctrl_call_disc {
666 u_int16_t call_id;
667 u_int8_t result_code;
668 u_int8_t error_code;
669 u_int16_t cause_code;
670 u_int16_t reserved_1;
671 u_int8_t statistics[128];
672 };
673
674 #define PF_PPTP_CTRL_TYPE_ERROR 14
675 struct pf_pptp_ctrl_error {
676 u_int16_t peer_call_id;
677 u_int16_t reserved_1;
678 u_int32_t crc_errors;
679 u_int32_t fr_errors;
680 u_int32_t hw_errors;
681 u_int32_t buf_errors;
682 u_int32_t tim_errors;
683 u_int32_t align_errors;
684 };
685
686 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
687 struct pf_pptp_ctrl_set_linkinfo {
688 u_int16_t peer_call_id;
689 u_int16_t reserved_1;
690 u_int32_t tx_accm;
691 u_int32_t rx_accm;
692 };
693
694 #if 0
695 static const char *pf_pptp_ctrl_type_name(u_int16_t code)
696 {
697 code = ntohs(code);
698
699 if (code < PF_PPTP_CTRL_TYPE_START_REQ ||
700 code > PF_PPTP_CTRL_TYPE_SET_LINKINFO) {
701 static char reserved[] = "reserved-00";
702
703 sprintf(&reserved[9], "%02x", code);
704 return (reserved);
705 } else {
706 static const char *name[] = {
707 "start_req", "start_rpy", "stop_req", "stop_rpy",
708 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
709 "call_in_1st", "call_in_2nd", "call_in_3rd",
710 "call_clr", "call_disc", "error", "set_linkinfo"
711 };
712
713 return (name[code - 1]);
714 }
715 };
716 #endif
717
718 static const size_t PF_PPTP_CTRL_MSG_MINSIZE =
719 sizeof (struct pf_pptp_hdr) +
720 sizeof (struct pf_pptp_ctrl_hdr) +
721 MIN(sizeof (struct pf_pptp_ctrl_start_req),
722 MIN(sizeof (struct pf_pptp_ctrl_start_rpy),
723 MIN(sizeof (struct pf_pptp_ctrl_stop_req),
724 MIN(sizeof (struct pf_pptp_ctrl_stop_rpy),
725 MIN(sizeof (struct pf_pptp_ctrl_echo_req),
726 MIN(sizeof (struct pf_pptp_ctrl_echo_rpy),
727 MIN(sizeof (struct pf_pptp_ctrl_call_out_req),
728 MIN(sizeof (struct pf_pptp_ctrl_call_out_rpy),
729 MIN(sizeof (struct pf_pptp_ctrl_call_in_1st),
730 MIN(sizeof (struct pf_pptp_ctrl_call_in_2nd),
731 MIN(sizeof (struct pf_pptp_ctrl_call_in_3rd),
732 MIN(sizeof (struct pf_pptp_ctrl_call_clr),
733 MIN(sizeof (struct pf_pptp_ctrl_call_disc),
734 MIN(sizeof (struct pf_pptp_ctrl_error),
735 sizeof (struct pf_pptp_ctrl_set_linkinfo)
736 ))))))))))))));
737
738 union pf_pptp_ctrl_msg_union {
739 struct pf_pptp_ctrl_start_req start_req;
740 struct pf_pptp_ctrl_start_rpy start_rpy;
741 struct pf_pptp_ctrl_stop_req stop_req;
742 struct pf_pptp_ctrl_stop_rpy stop_rpy;
743 struct pf_pptp_ctrl_echo_req echo_req;
744 struct pf_pptp_ctrl_echo_rpy echo_rpy;
745 struct pf_pptp_ctrl_call_out_req call_out_req;
746 struct pf_pptp_ctrl_call_out_rpy call_out_rpy;
747 struct pf_pptp_ctrl_call_in_1st call_in_1st;
748 struct pf_pptp_ctrl_call_in_2nd call_in_2nd;
749 struct pf_pptp_ctrl_call_in_3rd call_in_3rd;
750 struct pf_pptp_ctrl_call_clr call_clr;
751 struct pf_pptp_ctrl_call_disc call_disc;
752 struct pf_pptp_ctrl_error error;
753 struct pf_pptp_ctrl_set_linkinfo set_linkinfo;
754 u_int8_t data[0];
755 };
756
757 struct pf_pptp_ctrl_msg {
758 struct pf_pptp_hdr hdr;
759 struct pf_pptp_ctrl_hdr ctrl;
760 union pf_pptp_ctrl_msg_union msg;
761 };
762
763 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
764 #define PF_GRE_FLAG_VERSION_MASK 0x0007
765 #define PF_GRE_PPP_ETHERTYPE 0x880B
766
767 struct pf_grev1_hdr {
768 u_int16_t flags;
769 u_int16_t protocol_type;
770 u_int16_t payload_length;
771 u_int16_t call_id;
772 /*
773 u_int32_t seqno;
774 u_int32_t ackno;
775 */
776 };
777
778 static const u_int16_t PF_IKE_PORT = 500;
779
780 struct pf_ike_hdr {
781 u_int64_t initiator_cookie, responder_cookie;
782 u_int8_t next_payload, version, exchange_type, flags;
783 u_int32_t message_id, length;
784 };
785
786 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
787
788 #define PF_IKEv1_EXCHTYPE_BASE 1
789 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
790 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
791 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
792 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
793 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
794 #define PF_IKEv2_EXCHTYPE_AUTH 35
795 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
796 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
797
798 #define PF_IKEv1_FLAG_E 0x01
799 #define PF_IKEv1_FLAG_C 0x02
800 #define PF_IKEv1_FLAG_A 0x04
801 #define PF_IKEv2_FLAG_I 0x08
802 #define PF_IKEv2_FLAG_V 0x10
803 #define PF_IKEv2_FLAG_R 0x20
804
805 struct pf_esp_hdr {
806 u_int32_t spi;
807 u_int32_t seqno;
808 u_int8_t payload[];
809 };
810 #endif
811
812 static __inline int
813 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
814 {
815 int diff;
816
817 if (a->rule.ptr > b->rule.ptr)
818 return (1);
819 if (a->rule.ptr < b->rule.ptr)
820 return (-1);
821 if ((diff = a->af - b->af) != 0)
822 return (diff);
823 switch (a->af) {
824 #if INET
825 case AF_INET:
826 if (a->addr.addr32[0] > b->addr.addr32[0])
827 return (1);
828 if (a->addr.addr32[0] < b->addr.addr32[0])
829 return (-1);
830 break;
831 #endif /* INET */
832 #if INET6
833 case AF_INET6:
834 if (a->addr.addr32[3] > b->addr.addr32[3])
835 return (1);
836 if (a->addr.addr32[3] < b->addr.addr32[3])
837 return (-1);
838 if (a->addr.addr32[2] > b->addr.addr32[2])
839 return (1);
840 if (a->addr.addr32[2] < b->addr.addr32[2])
841 return (-1);
842 if (a->addr.addr32[1] > b->addr.addr32[1])
843 return (1);
844 if (a->addr.addr32[1] < b->addr.addr32[1])
845 return (-1);
846 if (a->addr.addr32[0] > b->addr.addr32[0])
847 return (1);
848 if (a->addr.addr32[0] < b->addr.addr32[0])
849 return (-1);
850 break;
851 #endif /* INET6 */
852 }
853 return (0);
854 }
855
856 static __inline int
857 pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b)
858 {
859 int diff;
860 #ifndef NO_APPLE_EXTENSIONS
861 int extfilter;
862 #endif
863
864 if ((diff = a->proto - b->proto) != 0)
865 return (diff);
866 if ((diff = a->af - b->af) != 0)
867 return (diff);
868
869 #ifndef NO_APPLE_EXTENSIONS
870 extfilter = PF_EXTFILTER_APD;
871
872 switch (a->proto) {
873 case IPPROTO_ICMP:
874 case IPPROTO_ICMPV6:
875 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
876 return (diff);
877 break;
878
879 case IPPROTO_TCP:
880 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
881 return (diff);
882 if ((diff = a->ext.xport.port - b->ext.xport.port) != 0)
883 return (diff);
884 break;
885
886 case IPPROTO_UDP:
887 if ((diff = a->proto_variant - b->proto_variant))
888 return (diff);
889 extfilter = a->proto_variant;
890 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
891 return (diff);
892 if ((extfilter < PF_EXTFILTER_AD) &&
893 (diff = a->ext.xport.port - b->ext.xport.port) != 0)
894 return (diff);
895 break;
896
897 case IPPROTO_GRE:
898 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
899 a->proto_variant == b->proto_variant) {
900 if (!!(diff = a->ext.xport.call_id -
901 b->ext.xport.call_id))
902 return (diff);
903 }
904 break;
905
906 case IPPROTO_ESP:
907 if (!!(diff = a->ext.xport.spi - b->ext.xport.spi))
908 return (diff);
909 break;
910
911 default:
912 break;
913 }
914 #endif
915
916 switch (a->af) {
917 #if INET
918 case AF_INET:
919 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
920 return (1);
921 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
922 return (-1);
923 #ifndef NO_APPLE_EXTENSIONS
924 if (extfilter < PF_EXTFILTER_EI) {
925 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
926 return (1);
927 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
928 return (-1);
929 }
930 #else
931 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
932 return (1);
933 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
934 return (-1);
935 #endif
936 break;
937 #endif /* INET */
938 #if INET6
939 case AF_INET6:
940 #ifndef NO_APPLE_EXTENSIONS
941 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
942 return (1);
943 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
944 return (-1);
945 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
946 return (1);
947 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
948 return (-1);
949 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
950 return (1);
951 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
952 return (-1);
953 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
954 return (1);
955 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
956 return (-1);
957 if (extfilter < PF_EXTFILTER_EI ||
958 !PF_AZERO(&b->ext.addr, AF_INET6)) {
959 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
960 return (1);
961 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
962 return (-1);
963 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
964 return (1);
965 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
966 return (-1);
967 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
968 return (1);
969 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
970 return (-1);
971 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
972 return (1);
973 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
974 return (-1);
975 }
976 #else
977 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
978 return (1);
979 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
980 return (-1);
981 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
982 return (1);
983 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
984 return (-1);
985 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
986 return (1);
987 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
988 return (-1);
989 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
990 return (1);
991 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
992 return (-1);
993 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
994 return (1);
995 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
996 return (-1);
997 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
998 return (1);
999 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
1000 return (-1);
1001 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
1002 return (1);
1003 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
1004 return (-1);
1005 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1006 return (1);
1007 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1008 return (-1);
1009 #endif
1010 break;
1011 #endif /* INET6 */
1012 }
1013
1014 #ifndef NO_APPLE_EXTENSIONS
1015 if (a->app_state && b->app_state) {
1016 if (a->app_state->compare_lan_ext &&
1017 b->app_state->compare_lan_ext) {
1018 diff = (const char *)b->app_state->compare_lan_ext -
1019 (const char *)a->app_state->compare_lan_ext;
1020 if (diff != 0)
1021 return (diff);
1022 diff = a->app_state->compare_lan_ext(a->app_state,
1023 b->app_state);
1024 if (diff != 0)
1025 return (diff);
1026 }
1027 }
1028 #else
1029 if ((diff = a->lan.port - b->lan.port) != 0)
1030 return (diff);
1031 if ((diff = a->ext.port - b->ext.port) != 0)
1032 return (diff);
1033 #endif
1034
1035 return (0);
1036 }
1037
1038 static __inline int
1039 pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b)
1040 {
1041 int diff;
1042 #ifndef NO_APPLE_EXTENSIONS
1043 int extfilter;
1044 #endif
1045
1046 if ((diff = a->proto - b->proto) != 0)
1047 return (diff);
1048
1049 if ((diff = a->af - b->af) != 0)
1050 return (diff);
1051
1052 #ifndef NO_APPLE_EXTENSIONS
1053 extfilter = PF_EXTFILTER_APD;
1054
1055 switch (a->proto) {
1056 case IPPROTO_ICMP:
1057 case IPPROTO_ICMPV6:
1058 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
1059 return (diff);
1060 break;
1061
1062 case IPPROTO_TCP:
1063 if ((diff = a->ext.xport.port - b->ext.xport.port) != 0)
1064 return (diff);
1065 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
1066 return (diff);
1067 break;
1068
1069 case IPPROTO_UDP:
1070 if ((diff = a->proto_variant - b->proto_variant))
1071 return (diff);
1072 extfilter = a->proto_variant;
1073 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
1074 return (diff);
1075 if ((extfilter < PF_EXTFILTER_AD) &&
1076 (diff = a->ext.xport.port - b->ext.xport.port) != 0)
1077 return (diff);
1078 break;
1079
1080 case IPPROTO_GRE:
1081 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
1082 a->proto_variant == b->proto_variant) {
1083 if (!!(diff = a->gwy.xport.call_id -
1084 b->gwy.xport.call_id))
1085 return (diff);
1086 }
1087 break;
1088
1089 case IPPROTO_ESP:
1090 if (!!(diff = a->gwy.xport.spi - b->gwy.xport.spi))
1091 return (diff);
1092 break;
1093
1094 default:
1095 break;
1096 }
1097 #endif
1098
1099 switch (a->af) {
1100 #if INET
1101 case AF_INET:
1102 #ifndef NO_APPLE_EXTENSIONS
1103 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1104 return (1);
1105 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1106 return (-1);
1107 if (extfilter < PF_EXTFILTER_EI) {
1108 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1109 return (1);
1110 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1111 return (-1);
1112 }
1113 #else
1114 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1115 return (1);
1116 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1117 return (-1);
1118 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1119 return (1);
1120 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1121 return (-1);
1122 #endif
1123 break;
1124 #endif /* INET */
1125 #if INET6
1126 case AF_INET6:
1127 #ifndef NO_APPLE_EXTENSIONS
1128 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
1129 return (1);
1130 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
1131 return (-1);
1132 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
1133 return (1);
1134 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
1135 return (-1);
1136 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
1137 return (1);
1138 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
1139 return (-1);
1140 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1141 return (1);
1142 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1143 return (-1);
1144 if (extfilter < PF_EXTFILTER_EI ||
1145 !PF_AZERO(&b->ext.addr, AF_INET6)) {
1146 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
1147 return (1);
1148 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
1149 return (-1);
1150 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
1151 return (1);
1152 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
1153 return (-1);
1154 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
1155 return (1);
1156 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
1157 return (-1);
1158 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1159 return (1);
1160 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1161 return (-1);
1162 }
1163 #else
1164 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
1165 return (1);
1166 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
1167 return (-1);
1168 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
1169 return (1);
1170 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
1171 return (-1);
1172 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
1173 return (1);
1174 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
1175 return (-1);
1176 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
1177 return (1);
1178 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
1179 return (-1);
1180 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
1181 return (1);
1182 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
1183 return (-1);
1184 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
1185 return (1);
1186 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
1187 return (-1);
1188 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1189 return (1);
1190 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1191 return (-1);
1192 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1193 return (1);
1194 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1195 return (-1);
1196 #endif
1197 break;
1198 #endif /* INET6 */
1199 }
1200
1201 #ifndef NO_APPLE_EXTENSIONS
1202 if (a->app_state && b->app_state) {
1203 if (a->app_state->compare_ext_gwy &&
1204 b->app_state->compare_ext_gwy) {
1205 diff = (const char *)b->app_state->compare_ext_gwy -
1206 (const char *)a->app_state->compare_ext_gwy;
1207 if (diff != 0)
1208 return (diff);
1209 diff = a->app_state->compare_ext_gwy(a->app_state,
1210 b->app_state);
1211 if (diff != 0)
1212 return (diff);
1213 }
1214 }
1215 #else
1216 if ((diff = a->ext.port - b->ext.port) != 0)
1217 return (diff);
1218 if ((diff = a->gwy.port - b->gwy.port) != 0)
1219 return (diff);
1220 #endif
1221
1222 return (0);
1223 }
1224
1225 static __inline int
1226 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
1227 {
1228 if (a->id > b->id)
1229 return (1);
1230 if (a->id < b->id)
1231 return (-1);
1232 if (a->creatorid > b->creatorid)
1233 return (1);
1234 if (a->creatorid < b->creatorid)
1235 return (-1);
1236
1237 return (0);
1238 }
1239
1240 #if INET6
1241 void
1242 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
1243 {
1244 switch (af) {
1245 #if INET
1246 case AF_INET:
1247 dst->addr32[0] = src->addr32[0];
1248 break;
1249 #endif /* INET */
1250 case AF_INET6:
1251 dst->addr32[0] = src->addr32[0];
1252 dst->addr32[1] = src->addr32[1];
1253 dst->addr32[2] = src->addr32[2];
1254 dst->addr32[3] = src->addr32[3];
1255 break;
1256 }
1257 }
1258 #endif /* INET6 */
1259
1260 struct pf_state *
1261 pf_find_state_byid(struct pf_state_cmp *key)
1262 {
1263 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1264
1265 return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
1266 }
1267
1268 static struct pf_state *
1269 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1270 {
1271 struct pf_state_key *sk = NULL;
1272 struct pf_state *s;
1273
1274 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1275
1276 switch (dir) {
1277 case PF_OUT:
1278 sk = RB_FIND(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1279 (struct pf_state_key *)key);
1280 break;
1281 case PF_IN:
1282 sk = RB_FIND(pf_state_tree_ext_gwy, &pf_statetbl_ext_gwy,
1283 (struct pf_state_key *)key);
1284 break;
1285 default:
1286 panic("pf_find_state");
1287 }
1288
1289 /* list is sorted, if-bound states before floating ones */
1290 if (sk != NULL)
1291 TAILQ_FOREACH(s, &sk->states, next)
1292 if (s->kif == pfi_all || s->kif == kif)
1293 return (s);
1294
1295 return (NULL);
1296 }
1297
1298 struct pf_state *
1299 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1300 {
1301 struct pf_state_key *sk = NULL;
1302 struct pf_state *s, *ret = NULL;
1303
1304 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1305
1306 switch (dir) {
1307 case PF_OUT:
1308 sk = RB_FIND(pf_state_tree_lan_ext,
1309 &pf_statetbl_lan_ext, (struct pf_state_key *)key);
1310 break;
1311 case PF_IN:
1312 sk = RB_FIND(pf_state_tree_ext_gwy,
1313 &pf_statetbl_ext_gwy, (struct pf_state_key *)key);
1314 break;
1315 default:
1316 panic("pf_find_state_all");
1317 }
1318
1319 if (sk != NULL) {
1320 ret = TAILQ_FIRST(&sk->states);
1321 if (more == NULL)
1322 return (ret);
1323
1324 TAILQ_FOREACH(s, &sk->states, next)
1325 (*more)++;
1326 }
1327
1328 return (ret);
1329 }
1330
1331 static void
1332 pf_init_threshold(struct pf_threshold *threshold,
1333 u_int32_t limit, u_int32_t seconds)
1334 {
1335 threshold->limit = limit * PF_THRESHOLD_MULT;
1336 threshold->seconds = seconds;
1337 threshold->count = 0;
1338 threshold->last = pf_time_second();
1339 }
1340
1341 static void
1342 pf_add_threshold(struct pf_threshold *threshold)
1343 {
1344 u_int32_t t = pf_time_second(), diff = t - threshold->last;
1345
1346 if (diff >= threshold->seconds)
1347 threshold->count = 0;
1348 else
1349 threshold->count -= threshold->count * diff /
1350 threshold->seconds;
1351 threshold->count += PF_THRESHOLD_MULT;
1352 threshold->last = t;
1353 }
1354
1355 static int
1356 pf_check_threshold(struct pf_threshold *threshold)
1357 {
1358 return (threshold->count > threshold->limit);
1359 }
1360
1361 static int
1362 pf_src_connlimit(struct pf_state **state)
1363 {
1364 int bad = 0;
1365
1366 (*state)->src_node->conn++;
1367 VERIFY((*state)->src_node->conn != 0);
1368 (*state)->src.tcp_est = 1;
1369 pf_add_threshold(&(*state)->src_node->conn_rate);
1370
1371 if ((*state)->rule.ptr->max_src_conn &&
1372 (*state)->rule.ptr->max_src_conn <
1373 (*state)->src_node->conn) {
1374 pf_status.lcounters[LCNT_SRCCONN]++;
1375 bad++;
1376 }
1377
1378 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
1379 pf_check_threshold(&(*state)->src_node->conn_rate)) {
1380 pf_status.lcounters[LCNT_SRCCONNRATE]++;
1381 bad++;
1382 }
1383
1384 if (!bad)
1385 return (0);
1386
1387 if ((*state)->rule.ptr->overload_tbl) {
1388 struct pfr_addr p;
1389 u_int32_t killed = 0;
1390
1391 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
1392 if (pf_status.debug >= PF_DEBUG_MISC) {
1393 printf("pf_src_connlimit: blocking address ");
1394 pf_print_host(&(*state)->src_node->addr, 0,
1395 (*state)->state_key->af);
1396 }
1397
1398 bzero(&p, sizeof (p));
1399 p.pfra_af = (*state)->state_key->af;
1400 switch ((*state)->state_key->af) {
1401 #if INET
1402 case AF_INET:
1403 p.pfra_net = 32;
1404 p.pfra_ip4addr = (*state)->src_node->addr.v4;
1405 break;
1406 #endif /* INET */
1407 #if INET6
1408 case AF_INET6:
1409 p.pfra_net = 128;
1410 p.pfra_ip6addr = (*state)->src_node->addr.v6;
1411 break;
1412 #endif /* INET6 */
1413 }
1414
1415 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
1416 &p, pf_calendar_time_second());
1417
1418 /* kill existing states if that's required. */
1419 if ((*state)->rule.ptr->flush) {
1420 struct pf_state_key *sk;
1421 struct pf_state *st;
1422
1423 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
1424 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
1425 sk = st->state_key;
1426 /*
1427 * Kill states from this source. (Only those
1428 * from the same rule if PF_FLUSH_GLOBAL is not
1429 * set)
1430 */
1431 if (sk->af ==
1432 (*state)->state_key->af &&
1433 (((*state)->state_key->direction ==
1434 PF_OUT &&
1435 PF_AEQ(&(*state)->src_node->addr,
1436 &sk->lan.addr, sk->af)) ||
1437 ((*state)->state_key->direction == PF_IN &&
1438 PF_AEQ(&(*state)->src_node->addr,
1439 &sk->ext.addr, sk->af))) &&
1440 ((*state)->rule.ptr->flush &
1441 PF_FLUSH_GLOBAL ||
1442 (*state)->rule.ptr == st->rule.ptr)) {
1443 st->timeout = PFTM_PURGE;
1444 st->src.state = st->dst.state =
1445 TCPS_CLOSED;
1446 killed++;
1447 }
1448 }
1449 if (pf_status.debug >= PF_DEBUG_MISC)
1450 printf(", %u states killed", killed);
1451 }
1452 if (pf_status.debug >= PF_DEBUG_MISC)
1453 printf("\n");
1454 }
1455
1456 /* kill this state */
1457 (*state)->timeout = PFTM_PURGE;
1458 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
1459 return (1);
1460 }
1461
1462 int
1463 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
1464 struct pf_addr *src, sa_family_t af)
1465 {
1466 struct pf_src_node k;
1467
1468 if (*sn == NULL) {
1469 k.af = af;
1470 PF_ACPY(&k.addr, src, af);
1471 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1472 rule->rpool.opts & PF_POOL_STICKYADDR)
1473 k.rule.ptr = rule;
1474 else
1475 k.rule.ptr = NULL;
1476 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
1477 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
1478 }
1479 if (*sn == NULL) {
1480 if (!rule->max_src_nodes ||
1481 rule->src_nodes < rule->max_src_nodes)
1482 (*sn) = pool_get(&pf_src_tree_pl, PR_WAITOK);
1483 else
1484 pf_status.lcounters[LCNT_SRCNODES]++;
1485 if ((*sn) == NULL)
1486 return (-1);
1487 bzero(*sn, sizeof (struct pf_src_node));
1488
1489 pf_init_threshold(&(*sn)->conn_rate,
1490 rule->max_src_conn_rate.limit,
1491 rule->max_src_conn_rate.seconds);
1492
1493 (*sn)->af = af;
1494 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1495 rule->rpool.opts & PF_POOL_STICKYADDR)
1496 (*sn)->rule.ptr = rule;
1497 else
1498 (*sn)->rule.ptr = NULL;
1499 PF_ACPY(&(*sn)->addr, src, af);
1500 if (RB_INSERT(pf_src_tree,
1501 &tree_src_tracking, *sn) != NULL) {
1502 if (pf_status.debug >= PF_DEBUG_MISC) {
1503 printf("pf: src_tree insert failed: ");
1504 pf_print_host(&(*sn)->addr, 0, af);
1505 printf("\n");
1506 }
1507 pool_put(&pf_src_tree_pl, *sn);
1508 return (-1);
1509 }
1510 (*sn)->creation = pf_time_second();
1511 (*sn)->ruletype = rule->action;
1512 if ((*sn)->rule.ptr != NULL)
1513 (*sn)->rule.ptr->src_nodes++;
1514 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
1515 pf_status.src_nodes++;
1516 } else {
1517 if (rule->max_src_states &&
1518 (*sn)->states >= rule->max_src_states) {
1519 pf_status.lcounters[LCNT_SRCSTATES]++;
1520 return (-1);
1521 }
1522 }
1523 return (0);
1524 }
1525
1526 static void
1527 pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif)
1528 {
1529 struct pf_state_key *sk = s->state_key;
1530
1531 if (pf_status.debug >= PF_DEBUG_MISC) {
1532 #ifndef NO_APPLE_EXTENSIONS
1533 printf("pf: state insert failed: %s %s ", tree, kif->pfik_name);
1534 switch (sk->proto) {
1535 case IPPROTO_TCP:
1536 printf("TCP");
1537 break;
1538 case IPPROTO_UDP:
1539 printf("UDP");
1540 break;
1541 case IPPROTO_ICMP:
1542 printf("ICMP4");
1543 break;
1544 case IPPROTO_ICMPV6:
1545 printf("ICMP6");
1546 break;
1547 default:
1548 printf("PROTO=%u", sk->proto);
1549 break;
1550 }
1551 printf(" lan: ");
1552 pf_print_sk_host(&sk->lan, sk->af, sk->proto,
1553 sk->proto_variant);
1554 printf(" gwy: ");
1555 pf_print_sk_host(&sk->gwy, sk->af, sk->proto,
1556 sk->proto_variant);
1557 printf(" ext: ");
1558 pf_print_sk_host(&sk->ext, sk->af, sk->proto,
1559 sk->proto_variant);
1560 #else
1561 printf("pf: state insert failed: %s %s", tree, kif->pfik_name);
1562 printf(" lan: ");
1563 pf_print_host(&sk->lan.addr, sk->lan.port,
1564 sk->af);
1565 printf(" gwy: ");
1566 pf_print_host(&sk->gwy.addr, sk->gwy.port,
1567 sk->af);
1568 printf(" ext: ");
1569 pf_print_host(&sk->ext.addr, sk->ext.port,
1570 sk->af);
1571 #endif
1572 if (s->sync_flags & PFSTATE_FROMSYNC)
1573 printf(" (from sync)");
1574 printf("\n");
1575 }
1576 }
1577
1578 int
1579 pf_insert_state(struct pfi_kif *kif, struct pf_state *s)
1580 {
1581 struct pf_state_key *cur;
1582 struct pf_state *sp;
1583
1584 VERIFY(s->state_key != NULL);
1585 s->kif = kif;
1586
1587 if ((cur = RB_INSERT(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1588 s->state_key)) != NULL) {
1589 /* key exists. check for same kif, if none, add to key */
1590 TAILQ_FOREACH(sp, &cur->states, next)
1591 if (sp->kif == kif) { /* collision! */
1592 pf_stateins_err("tree_lan_ext", s, kif);
1593 pf_detach_state(s,
1594 PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1595 return (-1);
1596 }
1597 pf_detach_state(s, PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1598 pf_attach_state(cur, s, kif == pfi_all ? 1 : 0);
1599 }
1600
1601 /* if cur != NULL, we already found a state key and attached to it */
1602 if (cur == NULL && (cur = RB_INSERT(pf_state_tree_ext_gwy,
1603 &pf_statetbl_ext_gwy, s->state_key)) != NULL) {
1604 /* must not happen. we must have found the sk above! */
1605 pf_stateins_err("tree_ext_gwy", s, kif);
1606 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
1607 return (-1);
1608 }
1609
1610 if (s->id == 0 && s->creatorid == 0) {
1611 s->id = htobe64(pf_status.stateid++);
1612 s->creatorid = pf_status.hostid;
1613 }
1614 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1615 if (pf_status.debug >= PF_DEBUG_MISC) {
1616 printf("pf: state insert failed: "
1617 "id: %016llx creatorid: %08x",
1618 be64toh(s->id), ntohl(s->creatorid));
1619 if (s->sync_flags & PFSTATE_FROMSYNC)
1620 printf(" (from sync)");
1621 printf("\n");
1622 }
1623 pf_detach_state(s, 0);
1624 return (-1);
1625 }
1626 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1627 pf_status.fcounters[FCNT_STATE_INSERT]++;
1628 pf_status.states++;
1629 VERIFY(pf_status.states != 0);
1630 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1631 #if NPFSYNC
1632 pfsync_insert_state(s);
1633 #endif
1634 return (0);
1635 }
1636
1637 void
1638 pf_purge_thread_fn(void *v, wait_result_t w)
1639 {
1640 #pragma unused(v, w)
1641 u_int32_t nloops = 0;
1642 int t = 0;
1643
1644 for (;;) {
1645 (void) tsleep(pf_purge_thread_fn, PWAIT, "pftm", t * hz);
1646
1647 lck_rw_lock_shared(pf_perim_lock);
1648 lck_mtx_lock(pf_lock);
1649
1650 /* purge everything if not running */
1651 if (!pf_status.running) {
1652 pf_purge_expired_states(pf_status.states);
1653 pf_purge_expired_fragments();
1654 pf_purge_expired_src_nodes();
1655
1656 /* terminate thread (we don't currently do this) */
1657 if (pf_purge_thread == NULL) {
1658 lck_mtx_unlock(pf_lock);
1659 lck_rw_done(pf_perim_lock);
1660
1661 thread_deallocate(current_thread());
1662 thread_terminate(current_thread());
1663 /* NOTREACHED */
1664 return;
1665 } else {
1666 /* if there's nothing left, sleep w/o timeout */
1667 if (pf_status.states == 0 &&
1668 pf_normalize_isempty() &&
1669 RB_EMPTY(&tree_src_tracking))
1670 t = 0;
1671
1672 lck_mtx_unlock(pf_lock);
1673 lck_rw_done(pf_perim_lock);
1674 continue;
1675 }
1676 } else if (t == 0) {
1677 /* Set timeout to 1 second */
1678 t = 1;
1679 }
1680
1681 /* process a fraction of the state table every second */
1682 pf_purge_expired_states(1 + (pf_status.states
1683 / pf_default_rule.timeout[PFTM_INTERVAL]));
1684
1685 /* purge other expired types every PFTM_INTERVAL seconds */
1686 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1687 pf_purge_expired_fragments();
1688 pf_purge_expired_src_nodes();
1689 nloops = 0;
1690 }
1691
1692 lck_mtx_unlock(pf_lock);
1693 lck_rw_done(pf_perim_lock);
1694 }
1695 }
1696
1697 u_int64_t
1698 pf_state_expires(const struct pf_state *state)
1699 {
1700 u_int32_t t;
1701 u_int32_t start;
1702 u_int32_t end;
1703 u_int32_t states;
1704
1705 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1706
1707 /* handle all PFTM_* > PFTM_MAX here */
1708 if (state->timeout == PFTM_PURGE)
1709 return (pf_time_second());
1710 if (state->timeout == PFTM_UNTIL_PACKET)
1711 return (0);
1712 VERIFY(state->timeout != PFTM_UNLINKED);
1713 VERIFY(state->timeout < PFTM_MAX);
1714 t = state->rule.ptr->timeout[state->timeout];
1715 if (!t)
1716 t = pf_default_rule.timeout[state->timeout];
1717 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1718 if (start) {
1719 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1720 states = state->rule.ptr->states;
1721 } else {
1722 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1723 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1724 states = pf_status.states;
1725 }
1726 if (end && states > start && start < end) {
1727 if (states < end)
1728 return (state->expire + t * (end - states) /
1729 (end - start));
1730 else
1731 return (pf_time_second());
1732 }
1733 return (state->expire + t);
1734 }
1735
1736 void
1737 pf_purge_expired_src_nodes(void)
1738 {
1739 struct pf_src_node *cur, *next;
1740
1741 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1742
1743 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1744 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1745
1746 if (cur->states <= 0 && cur->expire <= pf_time_second()) {
1747 if (cur->rule.ptr != NULL) {
1748 cur->rule.ptr->src_nodes--;
1749 if (cur->rule.ptr->states <= 0 &&
1750 cur->rule.ptr->max_src_nodes <= 0)
1751 pf_rm_rule(NULL, cur->rule.ptr);
1752 }
1753 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1754 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1755 pf_status.src_nodes--;
1756 pool_put(&pf_src_tree_pl, cur);
1757 }
1758 }
1759 }
1760
1761 void
1762 pf_src_tree_remove_state(struct pf_state *s)
1763 {
1764 u_int32_t t;
1765
1766 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1767
1768 if (s->src_node != NULL) {
1769 if (s->src.tcp_est) {
1770 VERIFY(s->src_node->conn > 0);
1771 --s->src_node->conn;
1772 }
1773 VERIFY(s->src_node->states > 0);
1774 if (--s->src_node->states <= 0) {
1775 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1776 if (!t)
1777 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1778 s->src_node->expire = pf_time_second() + t;
1779 }
1780 }
1781 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1782 VERIFY(s->nat_src_node->states > 0);
1783 if (--s->nat_src_node->states <= 0) {
1784 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1785 if (!t)
1786 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1787 s->nat_src_node->expire = pf_time_second() + t;
1788 }
1789 }
1790 s->src_node = s->nat_src_node = NULL;
1791 }
1792
1793 void
1794 pf_unlink_state(struct pf_state *cur)
1795 {
1796 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1797
1798 #ifndef NO_APPLE_EXTENSIONS
1799 if (cur->src.state == PF_TCPS_PROXY_DST) {
1800 pf_send_tcp(cur->rule.ptr, cur->state_key->af,
1801 &cur->state_key->ext.addr, &cur->state_key->lan.addr,
1802 cur->state_key->ext.xport.port,
1803 cur->state_key->lan.xport.port,
1804 cur->src.seqhi, cur->src.seqlo + 1,
1805 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1806 }
1807
1808 hook_runloop(&cur->unlink_hooks, HOOK_REMOVE|HOOK_FREE);
1809 #else
1810 if (cur->src.state == PF_TCPS_PROXY_DST) {
1811 pf_send_tcp(cur->rule.ptr, cur->state_key->af,
1812 &cur->state_key->ext.addr, &cur->state_key->lan.addr,
1813 cur->state_key->ext.port, cur->state_key->lan.port,
1814 cur->src.seqhi, cur->src.seqlo + 1,
1815 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1816 }
1817 #endif
1818 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1819 #if NPFSYNC
1820 if (cur->creatorid == pf_status.hostid)
1821 pfsync_delete_state(cur);
1822 #endif
1823 cur->timeout = PFTM_UNLINKED;
1824 pf_src_tree_remove_state(cur);
1825 pf_detach_state(cur, 0);
1826 }
1827
1828 /* callers should be at splpf and hold the
1829 * write_lock on pf_consistency_lock */
1830 void
1831 pf_free_state(struct pf_state *cur)
1832 {
1833 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1834 #if NPFSYNC
1835 if (pfsyncif != NULL &&
1836 (pfsyncif->sc_bulk_send_next == cur ||
1837 pfsyncif->sc_bulk_terminator == cur))
1838 return;
1839 #endif
1840 VERIFY(cur->timeout == PFTM_UNLINKED);
1841 VERIFY(cur->rule.ptr->states > 0);
1842 if (--cur->rule.ptr->states <= 0 &&
1843 cur->rule.ptr->src_nodes <= 0)
1844 pf_rm_rule(NULL, cur->rule.ptr);
1845 if (cur->nat_rule.ptr != NULL) {
1846 VERIFY(cur->nat_rule.ptr->states > 0);
1847 if (--cur->nat_rule.ptr->states <= 0 &&
1848 cur->nat_rule.ptr->src_nodes <= 0)
1849 pf_rm_rule(NULL, cur->nat_rule.ptr);
1850 }
1851 if (cur->anchor.ptr != NULL) {
1852 VERIFY(cur->anchor.ptr->states > 0);
1853 if (--cur->anchor.ptr->states <= 0)
1854 pf_rm_rule(NULL, cur->anchor.ptr);
1855 }
1856 pf_normalize_tcp_cleanup(cur);
1857 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1858 TAILQ_REMOVE(&state_list, cur, entry_list);
1859 if (cur->tag)
1860 pf_tag_unref(cur->tag);
1861 pool_put(&pf_state_pl, cur);
1862 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1863 VERIFY(pf_status.states > 0);
1864 pf_status.states--;
1865 }
1866
1867 void
1868 pf_purge_expired_states(u_int32_t maxcheck)
1869 {
1870 static struct pf_state *cur = NULL;
1871 struct pf_state *next;
1872
1873 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1874
1875 while (maxcheck--) {
1876 /* wrap to start of list when we hit the end */
1877 if (cur == NULL) {
1878 cur = TAILQ_FIRST(&state_list);
1879 if (cur == NULL)
1880 break; /* list empty */
1881 }
1882
1883 /* get next state, as cur may get deleted */
1884 next = TAILQ_NEXT(cur, entry_list);
1885
1886 if (cur->timeout == PFTM_UNLINKED) {
1887 pf_free_state(cur);
1888 } else if (pf_state_expires(cur) <= pf_time_second()) {
1889 /* unlink and free expired state */
1890 pf_unlink_state(cur);
1891 pf_free_state(cur);
1892 }
1893 cur = next;
1894 }
1895 }
1896
1897 int
1898 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1899 {
1900 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1901
1902 if (aw->type != PF_ADDR_TABLE)
1903 return (0);
1904 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
1905 return (1);
1906 return (0);
1907 }
1908
1909 void
1910 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1911 {
1912 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1913
1914 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1915 return;
1916 pfr_detach_table(aw->p.tbl);
1917 aw->p.tbl = NULL;
1918 }
1919
1920 void
1921 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1922 {
1923 struct pfr_ktable *kt = aw->p.tbl;
1924
1925 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1926
1927 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1928 return;
1929 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1930 kt = kt->pfrkt_root;
1931 aw->p.tbl = NULL;
1932 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1933 kt->pfrkt_cnt : -1;
1934 }
1935
1936 #ifndef NO_APPLE_EXTENSIONS
1937 static void
1938 pf_print_addr(struct pf_addr *addr, sa_family_t af)
1939 {
1940 switch (af) {
1941 #if INET
1942 case AF_INET: {
1943 u_int32_t a = ntohl(addr->addr32[0]);
1944 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1945 (a>>8)&255, a&255);
1946 break;
1947 }
1948 #endif /* INET */
1949 #if INET6
1950 case AF_INET6: {
1951 u_int16_t b;
1952 u_int8_t i, curstart = 255, curend = 0,
1953 maxstart = 0, maxend = 0;
1954 for (i = 0; i < 8; i++) {
1955 if (!addr->addr16[i]) {
1956 if (curstart == 255)
1957 curstart = i;
1958 else
1959 curend = i;
1960 } else {
1961 if (curstart) {
1962 if ((curend - curstart) >
1963 (maxend - maxstart)) {
1964 maxstart = curstart;
1965 maxend = curend;
1966 curstart = 255;
1967 }
1968 }
1969 }
1970 }
1971 for (i = 0; i < 8; i++) {
1972 if (i >= maxstart && i <= maxend) {
1973 if (maxend != 7) {
1974 if (i == maxstart)
1975 printf(":");
1976 } else {
1977 if (i == maxend)
1978 printf(":");
1979 }
1980 } else {
1981 b = ntohs(addr->addr16[i]);
1982 printf("%x", b);
1983 if (i < 7)
1984 printf(":");
1985 }
1986 }
1987 break;
1988 }
1989 #endif /* INET6 */
1990 }
1991 }
1992
1993 static void
1994 pf_print_sk_host(struct pf_state_host *sh, sa_family_t af, int proto,
1995 u_int8_t proto_variant)
1996 {
1997 pf_print_addr(&sh->addr, af);
1998
1999 switch (proto) {
2000 case IPPROTO_ESP:
2001 if (sh->xport.spi)
2002 printf("[%08x]", ntohl(sh->xport.spi));
2003 break;
2004
2005 case IPPROTO_GRE:
2006 if (proto_variant == PF_GRE_PPTP_VARIANT)
2007 printf("[%u]", ntohs(sh->xport.call_id));
2008 break;
2009
2010 case IPPROTO_TCP:
2011 case IPPROTO_UDP:
2012 printf("[%u]", ntohs(sh->xport.port));
2013 break;
2014
2015 default:
2016 break;
2017 }
2018 }
2019 #endif
2020
2021 static void
2022 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2023 {
2024 #ifndef NO_APPLE_EXTENSIONS
2025 pf_print_addr(addr, af);
2026 if (p)
2027 printf("[%u]", ntohs(p));
2028 #else
2029 switch (af) {
2030 #if INET
2031 case AF_INET: {
2032 u_int32_t a = ntohl(addr->addr32[0]);
2033 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2034 (a>>8)&255, a&255);
2035 if (p) {
2036 p = ntohs(p);
2037 printf(":%u", p);
2038 }
2039 break;
2040 }
2041 #endif /* INET */
2042 #if INET6
2043 case AF_INET6: {
2044 u_int16_t b;
2045 u_int8_t i, curstart = 255, curend = 0,
2046 maxstart = 0, maxend = 0;
2047 for (i = 0; i < 8; i++) {
2048 if (!addr->addr16[i]) {
2049 if (curstart == 255)
2050 curstart = i;
2051 else
2052 curend = i;
2053 } else {
2054 if (curstart) {
2055 if ((curend - curstart) >
2056 (maxend - maxstart)) {
2057 maxstart = curstart;
2058 maxend = curend;
2059 curstart = 255;
2060 }
2061 }
2062 }
2063 }
2064 for (i = 0; i < 8; i++) {
2065 if (i >= maxstart && i <= maxend) {
2066 if (maxend != 7) {
2067 if (i == maxstart)
2068 printf(":");
2069 } else {
2070 if (i == maxend)
2071 printf(":");
2072 }
2073 } else {
2074 b = ntohs(addr->addr16[i]);
2075 printf("%x", b);
2076 if (i < 7)
2077 printf(":");
2078 }
2079 }
2080 if (p) {
2081 p = ntohs(p);
2082 printf("[%u]", p);
2083 }
2084 break;
2085 }
2086 #endif /* INET6 */
2087 }
2088 #endif
2089 }
2090
2091 void
2092 pf_print_state(struct pf_state *s)
2093 {
2094 struct pf_state_key *sk = s->state_key;
2095 switch (sk->proto) {
2096 #ifndef NO_APPLE_EXTENSIONS
2097 case IPPROTO_ESP:
2098 printf("ESP ");
2099 break;
2100 case IPPROTO_GRE:
2101 printf("GRE%u ", sk->proto_variant);
2102 break;
2103 #endif
2104 case IPPROTO_TCP:
2105 printf("TCP ");
2106 break;
2107 case IPPROTO_UDP:
2108 printf("UDP ");
2109 break;
2110 case IPPROTO_ICMP:
2111 printf("ICMP ");
2112 break;
2113 case IPPROTO_ICMPV6:
2114 printf("ICMPV6 ");
2115 break;
2116 default:
2117 printf("%u ", sk->proto);
2118 break;
2119 }
2120 #ifndef NO_APPLE_EXTENSIONS
2121 pf_print_sk_host(&sk->lan, sk->af, sk->proto, sk->proto_variant);
2122 printf(" ");
2123 pf_print_sk_host(&sk->gwy, sk->af, sk->proto, sk->proto_variant);
2124 printf(" ");
2125 pf_print_sk_host(&sk->ext, sk->af, sk->proto, sk->proto_variant);
2126 #else
2127 pf_print_host(&sk->lan.addr, sk->lan.port, sk->af);
2128 printf(" ");
2129 pf_print_host(&sk->gwy.addr, sk->gwy.port, sk->af);
2130 printf(" ");
2131 pf_print_host(&sk->ext.addr, sk->ext.port, sk->af);
2132 #endif
2133 printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
2134 s->src.seqhi, s->src.max_win, s->src.seqdiff);
2135 if (s->src.wscale && s->dst.wscale)
2136 printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
2137 printf("]");
2138 printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
2139 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
2140 if (s->src.wscale && s->dst.wscale)
2141 printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
2142 printf("]");
2143 printf(" %u:%u", s->src.state, s->dst.state);
2144 }
2145
2146 void
2147 pf_print_flags(u_int8_t f)
2148 {
2149 if (f)
2150 printf(" ");
2151 if (f & TH_FIN)
2152 printf("F");
2153 if (f & TH_SYN)
2154 printf("S");
2155 if (f & TH_RST)
2156 printf("R");
2157 if (f & TH_PUSH)
2158 printf("P");
2159 if (f & TH_ACK)
2160 printf("A");
2161 if (f & TH_URG)
2162 printf("U");
2163 if (f & TH_ECE)
2164 printf("E");
2165 if (f & TH_CWR)
2166 printf("W");
2167 }
2168
2169 #define PF_SET_SKIP_STEPS(i) \
2170 do { \
2171 while (head[i] != cur) { \
2172 head[i]->skip[i].ptr = cur; \
2173 head[i] = TAILQ_NEXT(head[i], entries); \
2174 } \
2175 } while (0)
2176
2177 void
2178 pf_calc_skip_steps(struct pf_rulequeue *rules)
2179 {
2180 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
2181 int i;
2182
2183 cur = TAILQ_FIRST(rules);
2184 prev = cur;
2185 for (i = 0; i < PF_SKIP_COUNT; ++i)
2186 head[i] = cur;
2187 while (cur != NULL) {
2188
2189 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2190 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2191 if (cur->direction != prev->direction)
2192 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2193 if (cur->af != prev->af)
2194 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2195 if (cur->proto != prev->proto)
2196 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2197 if (cur->src.neg != prev->src.neg ||
2198 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2199 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2200 #ifndef NO_APPLE_EXTENSIONS
2201 {
2202 union pf_rule_xport *cx = &cur->src.xport;
2203 union pf_rule_xport *px = &prev->src.xport;
2204
2205 switch (cur->proto) {
2206 case IPPROTO_GRE:
2207 case IPPROTO_ESP:
2208 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2209 break;
2210 default:
2211 if (prev->proto == IPPROTO_GRE ||
2212 prev->proto == IPPROTO_ESP ||
2213 cx->range.op != px->range.op ||
2214 cx->range.port[0] != px->range.port[0] ||
2215 cx->range.port[1] != px->range.port[1])
2216 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2217 break;
2218 }
2219 }
2220 #else
2221 if (cur->src.port[0] != prev->src.port[0] ||
2222 cur->src.port[1] != prev->src.port[1] ||
2223 cur->src.port_op != prev->src.port_op)
2224 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2225 #endif
2226 if (cur->dst.neg != prev->dst.neg ||
2227 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2228 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2229 #ifndef NO_APPLE_EXTENSIONS
2230 {
2231 union pf_rule_xport *cx = &cur->dst.xport;
2232 union pf_rule_xport *px = &prev->dst.xport;
2233
2234 switch (cur->proto) {
2235 case IPPROTO_GRE:
2236 if (cur->proto != prev->proto ||
2237 cx->call_id != px->call_id)
2238 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2239 break;
2240 case IPPROTO_ESP:
2241 if (cur->proto != prev->proto ||
2242 cx->spi != px->spi)
2243 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2244 break;
2245 default:
2246 if (prev->proto == IPPROTO_GRE ||
2247 prev->proto == IPPROTO_ESP ||
2248 cx->range.op != px->range.op ||
2249 cx->range.port[0] != px->range.port[0] ||
2250 cx->range.port[1] != px->range.port[1])
2251 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2252 break;
2253 }
2254 }
2255 #else
2256 if (cur->dst.port[0] != prev->dst.port[0] ||
2257 cur->dst.port[1] != prev->dst.port[1] ||
2258 cur->dst.port_op != prev->dst.port_op)
2259 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2260 #endif
2261
2262 prev = cur;
2263 cur = TAILQ_NEXT(cur, entries);
2264 }
2265 for (i = 0; i < PF_SKIP_COUNT; ++i)
2266 PF_SET_SKIP_STEPS(i);
2267 }
2268
2269 static int
2270 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2271 {
2272 if (aw1->type != aw2->type)
2273 return (1);
2274 switch (aw1->type) {
2275 case PF_ADDR_ADDRMASK:
2276 case PF_ADDR_RANGE:
2277 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
2278 return (1);
2279 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
2280 return (1);
2281 return (0);
2282 case PF_ADDR_DYNIFTL:
2283 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2284 case PF_ADDR_NOROUTE:
2285 case PF_ADDR_URPFFAILED:
2286 return (0);
2287 case PF_ADDR_TABLE:
2288 return (aw1->p.tbl != aw2->p.tbl);
2289 case PF_ADDR_RTLABEL:
2290 return (aw1->v.rtlabel != aw2->v.rtlabel);
2291 default:
2292 printf("invalid address type: %d\n", aw1->type);
2293 return (1);
2294 }
2295 }
2296
2297 u_int16_t
2298 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2299 {
2300 u_int32_t l;
2301
2302 if (udp && !cksum)
2303 return (0);
2304 l = cksum + old - new;
2305 l = (l >> 16) + (l & 0xffff);
2306 l = l & 0xffff;
2307 if (udp && !l)
2308 return (0xffff);
2309 return (l);
2310 }
2311
2312 static void
2313 pf_change_ap(int dir, struct mbuf *m, struct pf_addr *a, u_int16_t *p,
2314 u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn,
2315 u_int8_t u, sa_family_t af)
2316 {
2317 struct pf_addr ao;
2318 u_int16_t po = *p;
2319
2320 PF_ACPY(&ao, a, af);
2321 PF_ACPY(a, an, af);
2322
2323 *p = pn;
2324
2325 switch (af) {
2326 #if INET
2327 case AF_INET:
2328 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2329 ao.addr16[0], an->addr16[0], 0),
2330 ao.addr16[1], an->addr16[1], 0);
2331 *p = pn;
2332 /*
2333 * If the packet is originated from an ALG on the NAT gateway
2334 * (source address is loopback or local), in which case the
2335 * TCP/UDP checksum field contains the pseudo header checksum
2336 * that's not yet complemented.
2337 */
2338 if (dir == PF_OUT && m != NULL &&
2339 (m->m_flags & M_PKTHDR) &&
2340 (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) {
2341 /* Pseudo-header checksum does not include ports */
2342 *pc = ~pf_cksum_fixup(pf_cksum_fixup(~*pc,
2343 ao.addr16[0], an->addr16[0], u),
2344 ao.addr16[1], an->addr16[1], u);
2345 } else {
2346 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2347 ao.addr16[0], an->addr16[0], u),
2348 ao.addr16[1], an->addr16[1], u),
2349 po, pn, u);
2350 }
2351 break;
2352 #endif /* INET */
2353 #if INET6
2354 case AF_INET6:
2355 /*
2356 * If the packet is originated from an ALG on the NAT gateway
2357 * (source address is loopback or local), in which case the
2358 * TCP/UDP checksum field contains the pseudo header checksum
2359 * that's not yet complemented.
2360 */
2361 if (dir == PF_OUT && m != NULL &&
2362 (m->m_flags & M_PKTHDR) &&
2363 (m->m_pkthdr.csum_flags & (CSUM_TCPIPV6 | CSUM_UDPIPV6))) {
2364 /* Pseudo-header checksum does not include ports */
2365 *pc = ~pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2366 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2367 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(~*pc,
2368 ao.addr16[0], an->addr16[0], u),
2369 ao.addr16[1], an->addr16[1], u),
2370 ao.addr16[2], an->addr16[2], u),
2371 ao.addr16[3], an->addr16[3], u),
2372 ao.addr16[4], an->addr16[4], u),
2373 ao.addr16[5], an->addr16[5], u),
2374 ao.addr16[6], an->addr16[6], u),
2375 ao.addr16[7], an->addr16[7], u),
2376 po, pn, u);
2377 } else {
2378 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2379 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2380 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2381 ao.addr16[0], an->addr16[0], u),
2382 ao.addr16[1], an->addr16[1], u),
2383 ao.addr16[2], an->addr16[2], u),
2384 ao.addr16[3], an->addr16[3], u),
2385 ao.addr16[4], an->addr16[4], u),
2386 ao.addr16[5], an->addr16[5], u),
2387 ao.addr16[6], an->addr16[6], u),
2388 ao.addr16[7], an->addr16[7], u),
2389 po, pn, u);
2390 }
2391 break;
2392 #endif /* INET6 */
2393 }
2394 }
2395
2396
2397 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2398 void
2399 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2400 {
2401 u_int32_t ao;
2402
2403 memcpy(&ao, a, sizeof (ao));
2404 memcpy(a, &an, sizeof (u_int32_t));
2405 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2406 ao % 65536, an % 65536, u);
2407 }
2408
2409 #if INET6
2410 static void
2411 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2412 {
2413 struct pf_addr ao;
2414
2415 PF_ACPY(&ao, a, AF_INET6);
2416 PF_ACPY(a, an, AF_INET6);
2417
2418 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2419 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2420 pf_cksum_fixup(pf_cksum_fixup(*c,
2421 ao.addr16[0], an->addr16[0], u),
2422 ao.addr16[1], an->addr16[1], u),
2423 ao.addr16[2], an->addr16[2], u),
2424 ao.addr16[3], an->addr16[3], u),
2425 ao.addr16[4], an->addr16[4], u),
2426 ao.addr16[5], an->addr16[5], u),
2427 ao.addr16[6], an->addr16[6], u),
2428 ao.addr16[7], an->addr16[7], u);
2429 }
2430 #endif /* INET6 */
2431
2432 static void
2433 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2434 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2435 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2436 {
2437 struct pf_addr oia, ooa;
2438
2439 PF_ACPY(&oia, ia, af);
2440 PF_ACPY(&ooa, oa, af);
2441
2442 /* Change inner protocol port, fix inner protocol checksum. */
2443 if (ip != NULL) {
2444 u_int16_t oip = *ip;
2445 u_int32_t opc = 0;
2446
2447 if (pc != NULL)
2448 opc = *pc;
2449 *ip = np;
2450 if (pc != NULL)
2451 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2452 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2453 if (pc != NULL)
2454 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2455 }
2456 /* Change inner ip address, fix inner ip and icmp checksums. */
2457 PF_ACPY(ia, na, af);
2458 switch (af) {
2459 #if INET
2460 case AF_INET: {
2461 u_int32_t oh2c = *h2c;
2462
2463 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2464 oia.addr16[0], ia->addr16[0], 0),
2465 oia.addr16[1], ia->addr16[1], 0);
2466 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2467 oia.addr16[0], ia->addr16[0], 0),
2468 oia.addr16[1], ia->addr16[1], 0);
2469 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2470 break;
2471 }
2472 #endif /* INET */
2473 #if INET6
2474 case AF_INET6:
2475 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2476 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2477 pf_cksum_fixup(pf_cksum_fixup(*ic,
2478 oia.addr16[0], ia->addr16[0], u),
2479 oia.addr16[1], ia->addr16[1], u),
2480 oia.addr16[2], ia->addr16[2], u),
2481 oia.addr16[3], ia->addr16[3], u),
2482 oia.addr16[4], ia->addr16[4], u),
2483 oia.addr16[5], ia->addr16[5], u),
2484 oia.addr16[6], ia->addr16[6], u),
2485 oia.addr16[7], ia->addr16[7], u);
2486 break;
2487 #endif /* INET6 */
2488 }
2489 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2490 PF_ACPY(oa, na, af);
2491 switch (af) {
2492 #if INET
2493 case AF_INET:
2494 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2495 ooa.addr16[0], oa->addr16[0], 0),
2496 ooa.addr16[1], oa->addr16[1], 0);
2497 break;
2498 #endif /* INET */
2499 #if INET6
2500 case AF_INET6:
2501 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2502 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2503 pf_cksum_fixup(pf_cksum_fixup(*ic,
2504 ooa.addr16[0], oa->addr16[0], u),
2505 ooa.addr16[1], oa->addr16[1], u),
2506 ooa.addr16[2], oa->addr16[2], u),
2507 ooa.addr16[3], oa->addr16[3], u),
2508 ooa.addr16[4], oa->addr16[4], u),
2509 ooa.addr16[5], oa->addr16[5], u),
2510 ooa.addr16[6], oa->addr16[6], u),
2511 ooa.addr16[7], oa->addr16[7], u);
2512 break;
2513 #endif /* INET6 */
2514 }
2515 }
2516
2517
2518 /*
2519 * Need to modulate the sequence numbers in the TCP SACK option
2520 * (credits to Krzysztof Pfaff for report and patch)
2521 */
2522 static int
2523 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2524 struct tcphdr *th, struct pf_state_peer *dst)
2525 {
2526 int hlen = (th->th_off << 2) - sizeof (*th), thoptlen = hlen;
2527 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2528 int copyback = 0, i, olen;
2529 struct sackblk sack;
2530
2531 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2532 if (hlen < TCPOLEN_SACKLEN ||
2533 !pf_pull_hdr(m, off + sizeof (*th), opts, hlen, NULL, NULL, pd->af))
2534 return (0);
2535
2536 while (hlen >= TCPOLEN_SACKLEN) {
2537 olen = opt[1];
2538 switch (*opt) {
2539 case TCPOPT_EOL: /* FALLTHROUGH */
2540 case TCPOPT_NOP:
2541 opt++;
2542 hlen--;
2543 break;
2544 case TCPOPT_SACK:
2545 if (olen > hlen)
2546 olen = hlen;
2547 if (olen >= TCPOLEN_SACKLEN) {
2548 for (i = 2; i + TCPOLEN_SACK <= olen;
2549 i += TCPOLEN_SACK) {
2550 memcpy(&sack, &opt[i], sizeof (sack));
2551 pf_change_a(&sack.start, &th->th_sum,
2552 htonl(ntohl(sack.start) -
2553 dst->seqdiff), 0);
2554 pf_change_a(&sack.end, &th->th_sum,
2555 htonl(ntohl(sack.end) -
2556 dst->seqdiff), 0);
2557 memcpy(&opt[i], &sack, sizeof (sack));
2558 }
2559 #ifndef NO_APPLE_EXTENSIONS
2560 copyback = off + sizeof (*th) + thoptlen;
2561 #else
2562 copyback = 1;
2563 #endif
2564 }
2565 /* FALLTHROUGH */
2566 default:
2567 if (olen < 2)
2568 olen = 2;
2569 hlen -= olen;
2570 opt += olen;
2571 }
2572 }
2573
2574 #ifndef NO_APPLE_EXTENSIONS
2575 if (copyback) {
2576 m = pf_lazy_makewritable(pd, m, copyback);
2577 if (!m)
2578 return (-1);
2579 m_copyback(m, off + sizeof (*th), thoptlen, opts);
2580 }
2581 #else
2582 if (copyback)
2583 m_copyback(m, off + sizeof (*th), thoptlen, opts);
2584 #endif
2585 return (copyback);
2586 }
2587
2588 static void
2589 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2590 const struct pf_addr *saddr, const struct pf_addr *daddr,
2591 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2592 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2593 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2594 {
2595 #pragma unused(eh, ifp)
2596 struct mbuf *m;
2597 int len, tlen;
2598 #if INET
2599 struct ip *h = NULL;
2600 #endif /* INET */
2601 #if INET6
2602 struct ip6_hdr *h6 = NULL;
2603 #endif /* INET6 */
2604 struct tcphdr *th = NULL;
2605 char *opt;
2606 struct pf_mtag *pf_mtag;
2607
2608 /* maximum segment size tcp option */
2609 tlen = sizeof (struct tcphdr);
2610 if (mss)
2611 tlen += 4;
2612
2613 switch (af) {
2614 #if INET
2615 case AF_INET:
2616 len = sizeof (struct ip) + tlen;
2617 break;
2618 #endif /* INET */
2619 #if INET6
2620 case AF_INET6:
2621 len = sizeof (struct ip6_hdr) + tlen;
2622 break;
2623 #endif /* INET6 */
2624 default:
2625 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2626 return;
2627 }
2628
2629 /* create outgoing mbuf */
2630 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2631 if (m == NULL)
2632 return;
2633
2634 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2635 m_free(m);
2636 return;
2637 }
2638
2639 if (tag)
2640 pf_mtag->flags |= PF_TAG_GENERATED;
2641 pf_mtag->tag = rtag;
2642
2643 if (r != NULL && PF_RTABLEID_IS_VALID(r->rtableid))
2644 pf_mtag->rtableid = r->rtableid;
2645
2646 #if ALTQ
2647 if (r != NULL && r->qid) {
2648 pf_mtag->qid = r->qid;
2649 /* add hints for ecn */
2650 pf_mtag->hdr = mtod(m, struct ip *);
2651 }
2652 #endif /* ALTQ */
2653 m->m_data += max_linkhdr;
2654 m->m_pkthdr.len = m->m_len = len;
2655 m->m_pkthdr.rcvif = NULL;
2656 bzero(m->m_data, len);
2657 switch (af) {
2658 #if INET
2659 case AF_INET:
2660 h = mtod(m, struct ip *);
2661
2662 /* IP header fields included in the TCP checksum */
2663 h->ip_p = IPPROTO_TCP;
2664 h->ip_len = htons(tlen);
2665 h->ip_src.s_addr = saddr->v4.s_addr;
2666 h->ip_dst.s_addr = daddr->v4.s_addr;
2667
2668 th = (struct tcphdr *)((caddr_t)h + sizeof (struct ip));
2669 break;
2670 #endif /* INET */
2671 #if INET6
2672 case AF_INET6:
2673 h6 = mtod(m, struct ip6_hdr *);
2674
2675 /* IP header fields included in the TCP checksum */
2676 h6->ip6_nxt = IPPROTO_TCP;
2677 h6->ip6_plen = htons(tlen);
2678 memcpy(&h6->ip6_src, &saddr->v6, sizeof (struct in6_addr));
2679 memcpy(&h6->ip6_dst, &daddr->v6, sizeof (struct in6_addr));
2680
2681 th = (struct tcphdr *)((caddr_t)h6 + sizeof (struct ip6_hdr));
2682 break;
2683 #endif /* INET6 */
2684 }
2685
2686 /* TCP header */
2687 th->th_sport = sport;
2688 th->th_dport = dport;
2689 th->th_seq = htonl(seq);
2690 th->th_ack = htonl(ack);
2691 th->th_off = tlen >> 2;
2692 th->th_flags = flags;
2693 th->th_win = htons(win);
2694
2695 if (mss) {
2696 opt = (char *)(th + 1);
2697 opt[0] = TCPOPT_MAXSEG;
2698 opt[1] = 4;
2699 #if BYTE_ORDER != BIG_ENDIAN
2700 HTONS(mss);
2701 #endif
2702 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2703 }
2704
2705 switch (af) {
2706 #if INET
2707 case AF_INET: {
2708 struct route ro;
2709
2710 /* TCP checksum */
2711 th->th_sum = in_cksum(m, len);
2712
2713 /* Finish the IP header */
2714 h->ip_v = 4;
2715 h->ip_hl = sizeof (*h) >> 2;
2716 h->ip_tos = IPTOS_LOWDELAY;
2717 /*
2718 * ip_output() expects ip_len and ip_off to be in host order.
2719 */
2720 h->ip_len = len;
2721 h->ip_off = (path_mtu_discovery ? IP_DF : 0);
2722 h->ip_ttl = ttl ? ttl : ip_defttl;
2723 h->ip_sum = 0;
2724
2725 bzero(&ro, sizeof (ro));
2726 ip_output(m, NULL, &ro, 0, NULL, NULL);
2727 if (ro.ro_rt != NULL)
2728 rtfree(ro.ro_rt);
2729 break;
2730 }
2731 #endif /* INET */
2732 #if INET6
2733 case AF_INET6: {
2734 struct route_in6 ro6;
2735
2736 /* TCP checksum */
2737 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2738 sizeof (struct ip6_hdr), tlen);
2739
2740 h6->ip6_vfc |= IPV6_VERSION;
2741 h6->ip6_hlim = IPV6_DEFHLIM;
2742
2743 bzero(&ro6, sizeof (ro6));
2744 ip6_output(m, NULL, &ro6, 0, NULL, NULL, NULL);
2745 if (ro6.ro_rt != NULL)
2746 rtfree(ro6.ro_rt);
2747 break;
2748 }
2749 #endif /* INET6 */
2750 }
2751 }
2752
2753 static void
2754 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2755 struct pf_rule *r)
2756 {
2757 struct mbuf *m0;
2758 struct pf_mtag *pf_mtag;
2759
2760 m0 = m_copy(m, 0, M_COPYALL);
2761 if (m0 == NULL)
2762 return;
2763
2764 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2765 return;
2766
2767 pf_mtag->flags |= PF_TAG_GENERATED;
2768
2769 if (PF_RTABLEID_IS_VALID(r->rtableid))
2770 pf_mtag->rtableid = r->rtableid;
2771
2772 #if ALTQ
2773 if (r->qid) {
2774 pf_mtag->qid = r->qid;
2775 /* add hints for ecn */
2776 pf_mtag->hdr = mtod(m0, struct ip *);
2777 }
2778 #endif /* ALTQ */
2779 switch (af) {
2780 #if INET
2781 case AF_INET:
2782 icmp_error(m0, type, code, 0, 0);
2783 break;
2784 #endif /* INET */
2785 #if INET6
2786 case AF_INET6:
2787 icmp6_error(m0, type, code, 0);
2788 break;
2789 #endif /* INET6 */
2790 }
2791 }
2792
2793 /*
2794 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2795 * If n is 0, they match if they are equal. If n is != 0, they match if they
2796 * are different.
2797 */
2798 int
2799 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2800 struct pf_addr *b, sa_family_t af)
2801 {
2802 int match = 0;
2803
2804 switch (af) {
2805 #if INET
2806 case AF_INET:
2807 if ((a->addr32[0] & m->addr32[0]) ==
2808 (b->addr32[0] & m->addr32[0]))
2809 match++;
2810 break;
2811 #endif /* INET */
2812 #if INET6
2813 case AF_INET6:
2814 if (((a->addr32[0] & m->addr32[0]) ==
2815 (b->addr32[0] & m->addr32[0])) &&
2816 ((a->addr32[1] & m->addr32[1]) ==
2817 (b->addr32[1] & m->addr32[1])) &&
2818 ((a->addr32[2] & m->addr32[2]) ==
2819 (b->addr32[2] & m->addr32[2])) &&
2820 ((a->addr32[3] & m->addr32[3]) ==
2821 (b->addr32[3] & m->addr32[3])))
2822 match++;
2823 break;
2824 #endif /* INET6 */
2825 }
2826 if (match) {
2827 if (n)
2828 return (0);
2829 else
2830 return (1);
2831 } else {
2832 if (n)
2833 return (1);
2834 else
2835 return (0);
2836 }
2837 }
2838
2839 /*
2840 * Return 1 if b <= a <= e, otherwise return 0.
2841 */
2842 int
2843 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2844 struct pf_addr *a, sa_family_t af)
2845 {
2846 switch (af) {
2847 #if INET
2848 case AF_INET:
2849 if ((a->addr32[0] < b->addr32[0]) ||
2850 (a->addr32[0] > e->addr32[0]))
2851 return (0);
2852 break;
2853 #endif /* INET */
2854 #if INET6
2855 case AF_INET6: {
2856 int i;
2857
2858 /* check a >= b */
2859 for (i = 0; i < 4; ++i)
2860 if (a->addr32[i] > b->addr32[i])
2861 break;
2862 else if (a->addr32[i] < b->addr32[i])
2863 return (0);
2864 /* check a <= e */
2865 for (i = 0; i < 4; ++i)
2866 if (a->addr32[i] < e->addr32[i])
2867 break;
2868 else if (a->addr32[i] > e->addr32[i])
2869 return (0);
2870 break;
2871 }
2872 #endif /* INET6 */
2873 }
2874 return (1);
2875 }
2876
2877 int
2878 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2879 {
2880 switch (op) {
2881 case PF_OP_IRG:
2882 return ((p > a1) && (p < a2));
2883 case PF_OP_XRG:
2884 return ((p < a1) || (p > a2));
2885 case PF_OP_RRG:
2886 return ((p >= a1) && (p <= a2));
2887 case PF_OP_EQ:
2888 return (p == a1);
2889 case PF_OP_NE:
2890 return (p != a1);
2891 case PF_OP_LT:
2892 return (p < a1);
2893 case PF_OP_LE:
2894 return (p <= a1);
2895 case PF_OP_GT:
2896 return (p > a1);
2897 case PF_OP_GE:
2898 return (p >= a1);
2899 }
2900 return (0); /* never reached */
2901 }
2902
2903 int
2904 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2905 {
2906 #if BYTE_ORDER != BIG_ENDIAN
2907 NTOHS(a1);
2908 NTOHS(a2);
2909 NTOHS(p);
2910 #endif
2911 return (pf_match(op, a1, a2, p));
2912 }
2913
2914 #ifndef NO_APPLE_EXTENSIONS
2915 int
2916 pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx,
2917 union pf_state_xport *sx)
2918 {
2919 int d = !0;
2920
2921 if (sx) {
2922 switch (proto) {
2923 case IPPROTO_GRE:
2924 if (proto_variant == PF_GRE_PPTP_VARIANT)
2925 d = (rx->call_id == sx->call_id);
2926 break;
2927
2928 case IPPROTO_ESP:
2929 d = (rx->spi == sx->spi);
2930 break;
2931
2932 case IPPROTO_TCP:
2933 case IPPROTO_UDP:
2934 case IPPROTO_ICMP:
2935 case IPPROTO_ICMPV6:
2936 if (rx->range.op)
2937 d = pf_match_port(rx->range.op,
2938 rx->range.port[0], rx->range.port[1],
2939 sx->port);
2940 break;
2941
2942 default:
2943 break;
2944 }
2945 }
2946
2947 return (d);
2948 }
2949 #endif
2950
2951 int
2952 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2953 {
2954 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2955 return (0);
2956 return (pf_match(op, a1, a2, u));
2957 }
2958
2959 int
2960 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2961 {
2962 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2963 return (0);
2964 return (pf_match(op, a1, a2, g));
2965 }
2966
2967 static int
2968 pf_match_tag(struct mbuf *m, struct pf_rule *r, struct pf_mtag *pf_mtag,
2969 int *tag)
2970 {
2971 #pragma unused(m)
2972 if (*tag == -1)
2973 *tag = pf_mtag->tag;
2974
2975 return ((!r->match_tag_not && r->match_tag == *tag) ||
2976 (r->match_tag_not && r->match_tag != *tag));
2977 }
2978
2979 int
2980 pf_tag_packet(struct mbuf *m, struct pf_mtag *pf_mtag, int tag,
2981 unsigned int rtableid)
2982 {
2983 if (tag <= 0 && !PF_RTABLEID_IS_VALID(rtableid))
2984 return (0);
2985
2986 if (pf_mtag == NULL && (pf_mtag = pf_get_mtag(m)) == NULL)
2987 return (1);
2988
2989 if (tag > 0)
2990 pf_mtag->tag = tag;
2991 if (PF_RTABLEID_IS_VALID(rtableid))
2992 pf_mtag->rtableid = rtableid;
2993
2994 return (0);
2995 }
2996
2997 void
2998 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2999 struct pf_rule **r, struct pf_rule **a, int *match)
3000 {
3001 struct pf_anchor_stackframe *f;
3002
3003 (*r)->anchor->match = 0;
3004 if (match)
3005 *match = 0;
3006 if (*depth >= (int)sizeof (pf_anchor_stack) /
3007 (int)sizeof (pf_anchor_stack[0])) {
3008 printf("pf_step_into_anchor: stack overflow\n");
3009 *r = TAILQ_NEXT(*r, entries);
3010 return;
3011 } else if (*depth == 0 && a != NULL)
3012 *a = *r;
3013 f = pf_anchor_stack + (*depth)++;
3014 f->rs = *rs;
3015 f->r = *r;
3016 if ((*r)->anchor_wildcard) {
3017 f->parent = &(*r)->anchor->children;
3018 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
3019 NULL) {
3020 *r = NULL;
3021 return;
3022 }
3023 *rs = &f->child->ruleset;
3024 } else {
3025 f->parent = NULL;
3026 f->child = NULL;
3027 *rs = &(*r)->anchor->ruleset;
3028 }
3029 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3030 }
3031
3032 int
3033 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
3034 struct pf_rule **r, struct pf_rule **a, int *match)
3035 {
3036 struct pf_anchor_stackframe *f;
3037 int quick = 0;
3038
3039 do {
3040 if (*depth <= 0)
3041 break;
3042 f = pf_anchor_stack + *depth - 1;
3043 if (f->parent != NULL && f->child != NULL) {
3044 if (f->child->match ||
3045 (match != NULL && *match)) {
3046 f->r->anchor->match = 1;
3047 *match = 0;
3048 }
3049 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
3050 if (f->child != NULL) {
3051 *rs = &f->child->ruleset;
3052 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3053 if (*r == NULL)
3054 continue;
3055 else
3056 break;
3057 }
3058 }
3059 (*depth)--;
3060 if (*depth == 0 && a != NULL)
3061 *a = NULL;
3062 *rs = f->rs;
3063 if (f->r->anchor->match || (match != NULL && *match))
3064 quick = f->r->quick;
3065 *r = TAILQ_NEXT(f->r, entries);
3066 } while (*r == NULL);
3067
3068 return (quick);
3069 }
3070
3071 #if INET6
3072 void
3073 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3074 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3075 {
3076 switch (af) {
3077 #if INET
3078 case AF_INET:
3079 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3080 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
3081 break;
3082 #endif /* INET */
3083 case AF_INET6:
3084 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3085 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
3086 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3087 ((rmask->addr32[1] ^ 0xffffffff) & saddr->addr32[1]);
3088 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3089 ((rmask->addr32[2] ^ 0xffffffff) & saddr->addr32[2]);
3090 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3091 ((rmask->addr32[3] ^ 0xffffffff) & saddr->addr32[3]);
3092 break;
3093 }
3094 }
3095
3096 void
3097 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3098 {
3099 switch (af) {
3100 #if INET
3101 case AF_INET:
3102 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3103 break;
3104 #endif /* INET */
3105 case AF_INET6:
3106 if (addr->addr32[3] == 0xffffffff) {
3107 addr->addr32[3] = 0;
3108 if (addr->addr32[2] == 0xffffffff) {
3109 addr->addr32[2] = 0;
3110 if (addr->addr32[1] == 0xffffffff) {
3111 addr->addr32[1] = 0;
3112 addr->addr32[0] =
3113 htonl(ntohl(addr->addr32[0]) + 1);
3114 } else
3115 addr->addr32[1] =
3116 htonl(ntohl(addr->addr32[1]) + 1);
3117 } else
3118 addr->addr32[2] =
3119 htonl(ntohl(addr->addr32[2]) + 1);
3120 } else
3121 addr->addr32[3] =
3122 htonl(ntohl(addr->addr32[3]) + 1);
3123 break;
3124 }
3125 }
3126 #endif /* INET6 */
3127
3128 #define mix(a, b, c) \
3129 do { \
3130 a -= b; a -= c; a ^= (c >> 13); \
3131 b -= c; b -= a; b ^= (a << 8); \
3132 c -= a; c -= b; c ^= (b >> 13); \
3133 a -= b; a -= c; a ^= (c >> 12); \
3134 b -= c; b -= a; b ^= (a << 16); \
3135 c -= a; c -= b; c ^= (b >> 5); \
3136 a -= b; a -= c; a ^= (c >> 3); \
3137 b -= c; b -= a; b ^= (a << 10); \
3138 c -= a; c -= b; c ^= (b >> 15); \
3139 } while (0)
3140
3141 /*
3142 * hash function based on bridge_hash in if_bridge.c
3143 */
3144 static void
3145 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
3146 struct pf_poolhashkey *key, sa_family_t af)
3147 {
3148 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
3149
3150 switch (af) {
3151 #if INET
3152 case AF_INET:
3153 a += inaddr->addr32[0];
3154 b += key->key32[1];
3155 mix(a, b, c);
3156 hash->addr32[0] = c + key->key32[2];
3157 break;
3158 #endif /* INET */
3159 #if INET6
3160 case AF_INET6:
3161 a += inaddr->addr32[0];
3162 b += inaddr->addr32[2];
3163 mix(a, b, c);
3164 hash->addr32[0] = c;
3165 a += inaddr->addr32[1];
3166 b += inaddr->addr32[3];
3167 c += key->key32[1];
3168 mix(a, b, c);
3169 hash->addr32[1] = c;
3170 a += inaddr->addr32[2];
3171 b += inaddr->addr32[1];
3172 c += key->key32[2];
3173 mix(a, b, c);
3174 hash->addr32[2] = c;
3175 a += inaddr->addr32[3];
3176 b += inaddr->addr32[0];
3177 c += key->key32[3];
3178 mix(a, b, c);
3179 hash->addr32[3] = c;
3180 break;
3181 #endif /* INET6 */
3182 }
3183 }
3184
3185 static int
3186 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
3187 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
3188 {
3189 unsigned char hash[16];
3190 struct pf_pool *rpool = &r->rpool;
3191 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
3192 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
3193 struct pf_pooladdr *acur = rpool->cur;
3194 struct pf_src_node k;
3195
3196 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
3197 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3198 k.af = af;
3199 PF_ACPY(&k.addr, saddr, af);
3200 if (r->rule_flag & PFRULE_RULESRCTRACK ||
3201 r->rpool.opts & PF_POOL_STICKYADDR)
3202 k.rule.ptr = r;
3203 else
3204 k.rule.ptr = NULL;
3205 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
3206 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
3207 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
3208 PF_ACPY(naddr, &(*sn)->raddr, af);
3209 if (pf_status.debug >= PF_DEBUG_MISC) {
3210 printf("pf_map_addr: src tracking maps ");
3211 pf_print_host(&k.addr, 0, af);
3212 printf(" to ");
3213 pf_print_host(naddr, 0, af);
3214 printf("\n");
3215 }
3216 return (0);
3217 }
3218 }
3219
3220 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
3221 return (1);
3222 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3223 switch (af) {
3224 #if INET
3225 case AF_INET:
3226 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
3227 (rpool->opts & PF_POOL_TYPEMASK) !=
3228 PF_POOL_ROUNDROBIN)
3229 return (1);
3230 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
3231 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
3232 break;
3233 #endif /* INET */
3234 #if INET6
3235 case AF_INET6:
3236 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
3237 (rpool->opts & PF_POOL_TYPEMASK) !=
3238 PF_POOL_ROUNDROBIN)
3239 return (1);
3240 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
3241 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
3242 break;
3243 #endif /* INET6 */
3244 }
3245 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3246 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
3247 return (1); /* unsupported */
3248 } else {
3249 raddr = &rpool->cur->addr.v.a.addr;
3250 rmask = &rpool->cur->addr.v.a.mask;
3251 }
3252
3253 switch (rpool->opts & PF_POOL_TYPEMASK) {
3254 case PF_POOL_NONE:
3255 PF_ACPY(naddr, raddr, af);
3256 break;
3257 case PF_POOL_BITMASK:
3258 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
3259 break;
3260 case PF_POOL_RANDOM:
3261 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
3262 switch (af) {
3263 #if INET
3264 case AF_INET:
3265 rpool->counter.addr32[0] = htonl(random());
3266 break;
3267 #endif /* INET */
3268 #if INET6
3269 case AF_INET6:
3270 if (rmask->addr32[3] != 0xffffffff)
3271 rpool->counter.addr32[3] =
3272 htonl(random());
3273 else
3274 break;
3275 if (rmask->addr32[2] != 0xffffffff)
3276 rpool->counter.addr32[2] =
3277 htonl(random());
3278 else
3279 break;
3280 if (rmask->addr32[1] != 0xffffffff)
3281 rpool->counter.addr32[1] =
3282 htonl(random());
3283 else
3284 break;
3285 if (rmask->addr32[0] != 0xffffffff)
3286 rpool->counter.addr32[0] =
3287 htonl(random());
3288 break;
3289 #endif /* INET6 */
3290 }
3291 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
3292 PF_ACPY(init_addr, naddr, af);
3293
3294 } else {
3295 PF_AINC(&rpool->counter, af);
3296 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
3297 }
3298 break;
3299 case PF_POOL_SRCHASH:
3300 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
3301 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
3302 break;
3303 case PF_POOL_ROUNDROBIN:
3304 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3305 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
3306 &rpool->tblidx, &rpool->counter,
3307 &raddr, &rmask, af))
3308 goto get_addr;
3309 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3310 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3311 &rpool->tblidx, &rpool->counter,
3312 &raddr, &rmask, af))
3313 goto get_addr;
3314 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
3315 goto get_addr;
3316
3317 try_next:
3318 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
3319 rpool->cur = TAILQ_FIRST(&rpool->list);
3320 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3321 rpool->tblidx = -1;
3322 if (pfr_pool_get(rpool->cur->addr.p.tbl,
3323 &rpool->tblidx, &rpool->counter,
3324 &raddr, &rmask, af)) {
3325 /* table contains no address of type 'af' */
3326 if (rpool->cur != acur)
3327 goto try_next;
3328 return (1);
3329 }
3330 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3331 rpool->tblidx = -1;
3332 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3333 &rpool->tblidx, &rpool->counter,
3334 &raddr, &rmask, af)) {
3335 /* table contains no address of type 'af' */
3336 if (rpool->cur != acur)
3337 goto try_next;
3338 return (1);
3339 }
3340 } else {
3341 raddr = &rpool->cur->addr.v.a.addr;
3342 rmask = &rpool->cur->addr.v.a.mask;
3343 PF_ACPY(&rpool->counter, raddr, af);
3344 }
3345
3346 get_addr:
3347 PF_ACPY(naddr, &rpool->counter, af);
3348 if (init_addr != NULL && PF_AZERO(init_addr, af))
3349 PF_ACPY(init_addr, naddr, af);
3350 PF_AINC(&rpool->counter, af);
3351 break;
3352 }
3353 if (*sn != NULL)
3354 PF_ACPY(&(*sn)->raddr, naddr, af);
3355
3356 if (pf_status.debug >= PF_DEBUG_MISC &&
3357 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3358 printf("pf_map_addr: selected address ");
3359 pf_print_host(naddr, 0, af);
3360 printf("\n");
3361 }
3362
3363 return (0);
3364 }
3365
3366 #ifndef NO_APPLE_EXTENSIONS
3367 static int
3368 pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r,
3369 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3370 union pf_state_xport *dxport, struct pf_addr *naddr,
3371 union pf_state_xport *nxport, struct pf_src_node **sn)
3372 #else
3373 int
3374 pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r,
3375 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport,
3376 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high,
3377 struct pf_src_node **sn)
3378 #endif
3379 {
3380 #pragma unused(kif)
3381 struct pf_state_key_cmp key;
3382 struct pf_addr init_addr;
3383 #ifndef NO_APPLE_EXTENSIONS
3384 unsigned int cut;
3385 sa_family_t af = pd->af;
3386 u_int8_t proto = pd->proto;
3387 unsigned int low = r->rpool.proxy_port[0];
3388 unsigned int high = r->rpool.proxy_port[1];
3389 #else
3390 u_int16_t cut;
3391 #endif
3392
3393 bzero(&init_addr, sizeof (init_addr));
3394 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3395 return (1);
3396
3397 if (proto == IPPROTO_ICMP) {
3398 low = 1;
3399 high = 65535;
3400 }
3401
3402 #ifndef NO_APPLE_EXTENSIONS
3403 if (!nxport)
3404 return (0); /* No output necessary. */
3405
3406 /*--- Special mapping rules for UDP ---*/
3407 if (proto == IPPROTO_UDP) {
3408
3409 /*--- Never float IKE source port ---*/
3410 if (ntohs(sxport->port) == PF_IKE_PORT) {
3411 nxport->port = sxport->port;
3412 return (0);
3413 }
3414
3415 /*--- Apply exterior mapping options ---*/
3416 if (r->extmap > PF_EXTMAP_APD) {
3417 struct pf_state *s;
3418
3419 TAILQ_FOREACH(s, &state_list, entry_list) {
3420 struct pf_state_key *sk = s->state_key;
3421 if (!sk)
3422 continue;
3423 if (s->nat_rule.ptr != r)
3424 continue;
3425 if (sk->proto != IPPROTO_UDP || sk->af != af)
3426 continue;
3427 if (sk->lan.xport.port != sxport->port)
3428 continue;
3429 if (PF_ANEQ(&sk->lan.addr, saddr, af))
3430 continue;
3431 if (r->extmap < PF_EXTMAP_EI &&
3432 PF_ANEQ(&sk->ext.addr, daddr, af))
3433 continue;
3434
3435 nxport->port = sk->gwy.xport.port;
3436 return (0);
3437 }
3438 }
3439 } else if (proto == IPPROTO_TCP) {
3440 struct pf_state* s;
3441 /*
3442 * APPLE MODIFICATION: <rdar://problem/6546358>
3443 * Fix allows....NAT to use a single binding for TCP session
3444 * with same source IP and source port
3445 */
3446 TAILQ_FOREACH(s, &state_list, entry_list) {
3447 struct pf_state_key* sk = s->state_key;
3448 if (!sk)
3449 continue;
3450 if (s->nat_rule.ptr != r)
3451 continue;
3452 if (sk->proto != IPPROTO_TCP || sk->af != af)
3453 continue;
3454 if (sk->lan.xport.port != sxport->port)
3455 continue;
3456 if (!(PF_AEQ(&sk->lan.addr, saddr, af)))
3457 continue;
3458 nxport->port = sk->gwy.xport.port;
3459 return (0);
3460 }
3461 }
3462 #endif
3463 do {
3464 key.af = af;
3465 key.proto = proto;
3466 PF_ACPY(&key.ext.addr, daddr, key.af);
3467 PF_ACPY(&key.gwy.addr, naddr, key.af);
3468 #ifndef NO_APPLE_EXTENSIONS
3469 switch (proto) {
3470 case IPPROTO_UDP:
3471 key.proto_variant = r->extfilter;
3472 break;
3473 default:
3474 key.proto_variant = 0;
3475 break;
3476 }
3477 if (dxport)
3478 key.ext.xport = *dxport;
3479 else
3480 memset(&key.ext.xport, 0, sizeof (key.ext.xport));
3481 #else
3482 key.ext.port = dport;
3483 #endif
3484 /*
3485 * port search; start random, step;
3486 * similar 2 portloop in in_pcbbind
3487 */
3488 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP ||
3489 proto == IPPROTO_ICMP)) {
3490 #ifndef NO_APPLE_EXTENSIONS
3491 if (dxport)
3492 key.gwy.xport = *dxport;
3493 else
3494 memset(&key.gwy.xport, 0,
3495 sizeof (key.ext.xport));
3496 #else
3497 key.gwy.port = dport;
3498 #endif
3499 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3500 return (0);
3501 } else if (low == 0 && high == 0) {
3502 #ifndef NO_APPLE_EXTENSIONS
3503 key.gwy.xport = *nxport;
3504 #else
3505 key.gwy.port = *nport;
3506 #endif
3507 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3508 return (0);
3509 } else if (low == high) {
3510 #ifndef NO_APPLE_EXTENSIONS
3511 key.gwy.xport.port = htons(low);
3512 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
3513 nxport->port = htons(low);
3514 return (0);
3515 }
3516 #else
3517 key.gwy.port = htons(low);
3518 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
3519 *nport = htons(low);
3520 return (0);
3521 }
3522 #endif
3523 } else {
3524 #ifndef NO_APPLE_EXTENSIONS
3525 unsigned int tmp;
3526 #else
3527 u_int16_t tmp;
3528 #endif
3529 if (low > high) {
3530 tmp = low;
3531 low = high;
3532 high = tmp;
3533 }
3534 /* low < high */
3535 cut = htonl(random()) % (1 + high - low) + low;
3536 /* low <= cut <= high */
3537 for (tmp = cut; tmp <= high; ++(tmp)) {
3538 #ifndef NO_APPLE_EXTENSIONS
3539 key.gwy.xport.port = htons(tmp);
3540 if (pf_find_state_all(&key, PF_IN, NULL) ==
3541 NULL) {
3542 nxport->port = htons(tmp);
3543 return (0);
3544 }
3545 #else
3546 key.gwy.port = htons(tmp);
3547 if (pf_find_state_all(&key, PF_IN, NULL) ==
3548 NULL) {
3549 *nport = htons(tmp);
3550 return (0);
3551 }
3552 #endif
3553 }
3554 for (tmp = cut - 1; tmp >= low; --(tmp)) {
3555 #ifndef NO_APPLE_EXTENSIONS
3556 key.gwy.xport.port = htons(tmp);
3557 if (pf_find_state_all(&key, PF_IN, NULL) ==
3558 NULL) {
3559 nxport->port = htons(tmp);
3560 return (0);
3561 }
3562 #else
3563 key.gwy.port = htons(tmp);
3564 if (pf_find_state_all(&key, PF_IN, NULL) ==
3565 NULL) {
3566 *nport = htons(tmp);
3567 return (0);
3568 }
3569 #endif
3570 }
3571 }
3572
3573 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
3574 case PF_POOL_RANDOM:
3575 case PF_POOL_ROUNDROBIN:
3576 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3577 return (1);
3578 break;
3579 case PF_POOL_NONE:
3580 case PF_POOL_SRCHASH:
3581 case PF_POOL_BITMASK:
3582 default:
3583 return (1);
3584 }
3585 } while (!PF_AEQ(&init_addr, naddr, af));
3586
3587 return (1); /* none available */
3588 }
3589
3590 #ifndef NO_APPLE_EXTENSIONS
3591 static struct pf_rule *
3592 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
3593 int direction, struct pfi_kif *kif, struct pf_addr *saddr,
3594 union pf_state_xport *sxport, struct pf_addr *daddr,
3595 union pf_state_xport *dxport, int rs_num)
3596 #else
3597 struct pf_rule *
3598 pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
3599 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
3600 struct pf_addr *daddr, u_int16_t dport, int rs_num)
3601 #endif
3602 {
3603 struct pf_rule *r, *rm = NULL;
3604 struct pf_ruleset *ruleset = NULL;
3605 int tag = -1;
3606 unsigned int rtableid = IFSCOPE_NONE;
3607 int asd = 0;
3608
3609 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
3610 while (r && rm == NULL) {
3611 struct pf_rule_addr *src = NULL, *dst = NULL;
3612 struct pf_addr_wrap *xdst = NULL;
3613 #ifndef NO_APPLE_EXTENSIONS
3614 struct pf_addr_wrap *xsrc = NULL;
3615 union pf_rule_xport rdrxport;
3616 #endif
3617
3618 if (r->action == PF_BINAT && direction == PF_IN) {
3619 src = &r->dst;
3620 if (r->rpool.cur != NULL)
3621 xdst = &r->rpool.cur->addr;
3622 #ifndef NO_APPLE_EXTENSIONS
3623 } else if (r->action == PF_RDR && direction == PF_OUT) {
3624 dst = &r->src;
3625 src = &r->dst;
3626 if (r->rpool.cur != NULL) {
3627 rdrxport.range.op = PF_OP_EQ;
3628 rdrxport.range.port[0] =
3629 htons(r->rpool.proxy_port[0]);
3630 xsrc = &r->rpool.cur->addr;
3631 }
3632 #endif
3633 } else {
3634 src = &r->src;
3635 dst = &r->dst;
3636 }
3637
3638 r->evaluations++;
3639 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3640 r = r->skip[PF_SKIP_IFP].ptr;
3641 else if (r->direction && r->direction != direction)
3642 r = r->skip[PF_SKIP_DIR].ptr;
3643 else if (r->af && r->af != pd->af)
3644 r = r->skip[PF_SKIP_AF].ptr;
3645 else if (r->proto && r->proto != pd->proto)
3646 r = r->skip[PF_SKIP_PROTO].ptr;
3647 #ifndef NO_APPLE_EXTENSIONS
3648 else if (xsrc && PF_MISMATCHAW(xsrc, saddr, pd->af, 0, NULL))
3649 r = TAILQ_NEXT(r, entries);
3650 else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af,
3651 src->neg, kif))
3652 r = TAILQ_NEXT(r, entries);
3653 else if (xsrc && (!rdrxport.range.port[0] ||
3654 !pf_match_xport(r->proto, r->proto_variant, &rdrxport,
3655 sxport)))
3656 r = TAILQ_NEXT(r, entries);
3657 else if (!xsrc && !pf_match_xport(r->proto,
3658 r->proto_variant, &src->xport, sxport))
3659 #else
3660 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af,
3661 src->neg, kif))
3662 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
3663 PF_SKIP_DST_ADDR].ptr;
3664 else if (src->port_op && !pf_match_port(src->port_op,
3665 src->port[0], src->port[1], sport))
3666 #endif
3667 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
3668 PF_SKIP_DST_PORT].ptr;
3669 else if (dst != NULL &&
3670 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
3671 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3672 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
3673 0, NULL))
3674 r = TAILQ_NEXT(r, entries);
3675 #ifndef NO_APPLE_EXTENSIONS
3676 else if (dst && !pf_match_xport(r->proto, r->proto_variant,
3677 &dst->xport, dxport))
3678 #else
3679 else if (dst != NULL && dst->port_op &&
3680 !pf_match_port(dst->port_op, dst->port[0],
3681 dst->port[1], dport))
3682 #endif
3683 r = r->skip[PF_SKIP_DST_PORT].ptr;
3684 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
3685 r = TAILQ_NEXT(r, entries);
3686 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
3687 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
3688 off, pd->hdr.tcp), r->os_fingerprint)))
3689 r = TAILQ_NEXT(r, entries);
3690 else {
3691 if (r->tag)
3692 tag = r->tag;
3693 if (PF_RTABLEID_IS_VALID(r->rtableid))
3694 rtableid = r->rtableid;
3695 if (r->anchor == NULL) {
3696 rm = r;
3697 } else
3698 pf_step_into_anchor(&asd, &ruleset, rs_num,
3699 &r, NULL, NULL);
3700 }
3701 if (r == NULL)
3702 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
3703 NULL, NULL);
3704 }
3705 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid))
3706 return (NULL);
3707 if (rm != NULL && (rm->action == PF_NONAT ||
3708 rm->action == PF_NORDR || rm->action == PF_NOBINAT))
3709 return (NULL);
3710 return (rm);
3711 }
3712
3713 #ifndef NO_APPLE_EXTENSIONS
3714 static struct pf_rule *
3715 pf_get_translation_aux(struct pf_pdesc *pd, struct mbuf *m, int off,
3716 int direction, struct pfi_kif *kif, struct pf_src_node **sn,
3717 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3718 union pf_state_xport *dxport, struct pf_addr *naddr,
3719 union pf_state_xport *nxport)
3720 #else
3721 struct pf_rule *
3722 pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
3723 struct pfi_kif *kif, struct pf_src_node **sn,
3724 struct pf_addr *saddr, u_int16_t sport,
3725 struct pf_addr *daddr, u_int16_t dport,
3726 struct pf_addr *naddr, u_int16_t *nport)
3727 #endif
3728 {
3729 struct pf_rule *r = NULL;
3730
3731 #ifndef NO_APPLE_EXTENSIONS
3732 if (direction == PF_OUT) {
3733 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3734 sxport, daddr, dxport, PF_RULESET_BINAT);
3735 if (r == NULL)
3736 r = pf_match_translation(pd, m, off, direction, kif,
3737 saddr, sxport, daddr, dxport, PF_RULESET_RDR);
3738 if (r == NULL)
3739 r = pf_match_translation(pd, m, off, direction, kif,
3740 saddr, sxport, daddr, dxport, PF_RULESET_NAT);
3741 } else {
3742 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3743 sxport, daddr, dxport, PF_RULESET_RDR);
3744 if (r == NULL)
3745 r = pf_match_translation(pd, m, off, direction, kif,
3746 saddr, sxport, daddr, dxport, PF_RULESET_BINAT);
3747 }
3748 #else
3749 if (direction == PF_OUT) {
3750 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3751 sport, daddr, dport, PF_RULESET_BINAT);
3752 if (r == NULL)
3753 r = pf_match_translation(pd, m, off, direction, kif,
3754 saddr, sport, daddr, dport, PF_RULESET_NAT);
3755 } else {
3756 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3757 sport, daddr, dport, PF_RULESET_RDR);
3758 if (r == NULL)
3759 r = pf_match_translation(pd, m, off, direction, kif,
3760 saddr, sport, daddr, dport, PF_RULESET_BINAT);
3761 }
3762 #endif
3763
3764 if (r != NULL) {
3765 switch (r->action) {
3766 case PF_NONAT:
3767 case PF_NOBINAT:
3768 case PF_NORDR:
3769 return (NULL);
3770 case PF_NAT:
3771 #ifndef NO_APPLE_EXTENSIONS
3772 if (pf_get_sport(pd, kif, r, saddr, sxport, daddr,
3773 dxport, naddr, nxport, sn)) {
3774 #else
3775 if (pf_get_sport(pd->af, pd->proto, r, saddr,
3776 daddr, dport, naddr, nport, r->rpool.proxy_port[0],
3777 r->rpool.proxy_port[1], sn)) {
3778 #endif
3779 DPFPRINTF(PF_DEBUG_MISC,
3780 ("pf: NAT proxy port allocation "
3781 "(%u-%u) failed\n",
3782 r->rpool.proxy_port[0],
3783 r->rpool.proxy_port[1]));
3784 return (NULL);
3785 }
3786 break;
3787 case PF_BINAT:
3788 switch (direction) {
3789 case PF_OUT:
3790 if (r->rpool.cur->addr.type ==
3791 PF_ADDR_DYNIFTL) {
3792 switch (pd->af) {
3793 #if INET
3794 case AF_INET:
3795 if (r->rpool.cur->addr.p.dyn->
3796 pfid_acnt4 < 1)
3797 return (NULL);
3798 PF_POOLMASK(naddr,
3799 &r->rpool.cur->addr.p.dyn->
3800 pfid_addr4,
3801 &r->rpool.cur->addr.p.dyn->
3802 pfid_mask4,
3803 saddr, AF_INET);
3804 break;
3805 #endif /* INET */
3806 #if INET6
3807 case AF_INET6:
3808 if (r->rpool.cur->addr.p.dyn->
3809 pfid_acnt6 < 1)
3810 return (NULL);
3811 PF_POOLMASK(naddr,
3812 &r->rpool.cur->addr.p.dyn->
3813 pfid_addr6,
3814 &r->rpool.cur->addr.p.dyn->
3815 pfid_mask6,
3816 saddr, AF_INET6);
3817 break;
3818 #endif /* INET6 */
3819 }
3820 } else {
3821 PF_POOLMASK(naddr,
3822 &r->rpool.cur->addr.v.a.addr,
3823 &r->rpool.cur->addr.v.a.mask,
3824 saddr, pd->af);
3825 }
3826 break;
3827 case PF_IN:
3828 if (r->src.addr.type == PF_ADDR_DYNIFTL) {
3829 switch (pd->af) {
3830 #if INET
3831 case AF_INET:
3832 if (r->src.addr.p.dyn->
3833 pfid_acnt4 < 1)
3834 return (NULL);
3835 PF_POOLMASK(naddr,
3836 &r->src.addr.p.dyn->
3837 pfid_addr4,
3838 &r->src.addr.p.dyn->
3839 pfid_mask4,
3840 daddr, AF_INET);
3841 break;
3842 #endif /* INET */
3843 #if INET6
3844 case AF_INET6:
3845 if (r->src.addr.p.dyn->
3846 pfid_acnt6 < 1)
3847 return (NULL);
3848 PF_POOLMASK(naddr,
3849 &r->src.addr.p.dyn->
3850 pfid_addr6,
3851 &r->src.addr.p.dyn->
3852 pfid_mask6,
3853 daddr, AF_INET6);
3854 break;
3855 #endif /* INET6 */
3856 }
3857 } else
3858 PF_POOLMASK(naddr,
3859 &r->src.addr.v.a.addr,
3860 &r->src.addr.v.a.mask, daddr,
3861 pd->af);
3862 break;
3863 }
3864 break;
3865 case PF_RDR: {
3866 #ifndef NO_APPLE_EXTENSIONS
3867 switch (direction) {
3868 case PF_OUT:
3869 if (r->dst.addr.type == PF_ADDR_DYNIFTL) {
3870 switch (pd->af) {
3871 #if INET
3872 case AF_INET:
3873 if (r->dst.addr.p.dyn->
3874 pfid_acnt4 < 1)
3875 return (NULL);
3876 PF_POOLMASK(naddr,
3877 &r->dst.addr.p.dyn->
3878 pfid_addr4,
3879 &r->dst.addr.p.dyn->
3880 pfid_mask4,
3881 daddr, AF_INET);
3882 break;
3883 #endif /* INET */
3884 #if INET6
3885 case AF_INET6:
3886 if (r->dst.addr.p.dyn->
3887 pfid_acnt6 < 1)
3888 return (NULL);
3889 PF_POOLMASK(naddr,
3890 &r->dst.addr.p.dyn->
3891 pfid_addr6,
3892 &r->dst.addr.p.dyn->
3893 pfid_mask6,
3894 daddr, AF_INET6);
3895 break;
3896 #endif /* INET6 */
3897 }
3898 } else {
3899 PF_POOLMASK(naddr,
3900 &r->dst.addr.v.a.addr,
3901 &r->dst.addr.v.a.mask,
3902 daddr, pd->af);
3903 }
3904 if (nxport && r->dst.xport.range.port[0])
3905 nxport->port =
3906 r->dst.xport.range.port[0];
3907 break;
3908 case PF_IN:
3909 if (pf_map_addr(pd->af, r, saddr,
3910 naddr, NULL, sn))
3911 return (NULL);
3912 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3913 PF_POOL_BITMASK)
3914 PF_POOLMASK(naddr, naddr,
3915 &r->rpool.cur->addr.v.a.mask, daddr,
3916 pd->af);
3917
3918 if (nxport && dxport) {
3919 if (r->rpool.proxy_port[1]) {
3920 u_int32_t tmp_nport;
3921
3922 tmp_nport =
3923 ((ntohs(dxport->port) -
3924 ntohs(r->dst.xport.range.
3925 port[0])) %
3926 (r->rpool.proxy_port[1] -
3927 r->rpool.proxy_port[0] +
3928 1)) + r->rpool.proxy_port[0];
3929
3930 /* wrap around if necessary */
3931 if (tmp_nport > 65535)
3932 tmp_nport -= 65535;
3933 nxport->port =
3934 htons((u_int16_t)tmp_nport);
3935 } else if (r->rpool.proxy_port[0]) {
3936 nxport->port = htons(r->rpool.
3937 proxy_port[0]);
3938 }
3939 }
3940 break;
3941 }
3942 #else
3943 if (pf_map_addr(pd->af, r, saddr, naddr, NULL, sn))
3944 return (NULL);
3945 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3946 PF_POOL_BITMASK)
3947 PF_POOLMASK(naddr, naddr,
3948 &r->rpool.cur->addr.v.a.mask, daddr,
3949 pd->af);
3950
3951 if (r->rpool.proxy_port[1]) {
3952 u_int32_t tmp_nport;
3953
3954 tmp_nport = ((ntohs(dport) -
3955 ntohs(r->dst.port[0])) %
3956 (r->rpool.proxy_port[1] -
3957 r->rpool.proxy_port[0] + 1)) +
3958 r->rpool.proxy_port[0];
3959
3960 /* wrap around if necessary */
3961 if (tmp_nport > 65535)
3962 tmp_nport -= 65535;
3963 *nport = htons((u_int16_t)tmp_nport);
3964 } else if (r->rpool.proxy_port[0])
3965 *nport = htons(r->rpool.proxy_port[0]);
3966 #endif
3967 break;
3968 }
3969 default:
3970 return (NULL);
3971 }
3972 }
3973
3974 return (r);
3975 }
3976
3977 int
3978 pf_socket_lookup(int direction, struct pf_pdesc *pd)
3979 {
3980 struct pf_addr *saddr, *daddr;
3981 u_int16_t sport, dport;
3982 struct inpcbinfo *pi;
3983 int inp = 0;
3984
3985 if (pd == NULL)
3986 return (-1);
3987 pd->lookup.uid = UID_MAX;
3988 pd->lookup.gid = GID_MAX;
3989 pd->lookup.pid = NO_PID;
3990
3991 switch (pd->proto) {
3992 case IPPROTO_TCP:
3993 if (pd->hdr.tcp == NULL)
3994 return (-1);
3995 sport = pd->hdr.tcp->th_sport;
3996 dport = pd->hdr.tcp->th_dport;
3997 pi = &tcbinfo;
3998 break;
3999 case IPPROTO_UDP:
4000 if (pd->hdr.udp == NULL)
4001 return (-1);
4002 sport = pd->hdr.udp->uh_sport;
4003 dport = pd->hdr.udp->uh_dport;
4004 pi = &udbinfo;
4005 break;
4006 default:
4007 return (-1);
4008 }
4009 if (direction == PF_IN) {
4010 saddr = pd->src;
4011 daddr = pd->dst;
4012 } else {
4013 u_int16_t p;
4014
4015 p = sport;
4016 sport = dport;
4017 dport = p;
4018 saddr = pd->dst;
4019 daddr = pd->src;
4020 }
4021 switch (pd->af) {
4022 #if INET
4023 case AF_INET:
4024 inp = in_pcblookup_hash_exists(pi, saddr->v4, sport, daddr->v4, dport,
4025 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
4026 #if INET6
4027 if (inp == 0) {
4028 struct in6_addr s6, d6;
4029
4030 memset(&s6, 0, sizeof (s6));
4031 s6.s6_addr16[5] = htons(0xffff);
4032 memcpy(&s6.s6_addr32[3], &saddr->v4,
4033 sizeof (saddr->v4));
4034
4035 memset(&d6, 0, sizeof (d6));
4036 d6.s6_addr16[5] = htons(0xffff);
4037 memcpy(&d6.s6_addr32[3], &daddr->v4,
4038 sizeof (daddr->v4));
4039
4040 inp = in6_pcblookup_hash_exists(pi, &s6, sport,
4041 &d6, dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
4042 if (inp == 0) {
4043 inp = in_pcblookup_hash_exists(pi, saddr->v4, sport,
4044 daddr->v4, dport, INPLOOKUP_WILDCARD, &pd->lookup.uid, &pd->lookup.gid, NULL);
4045 if (inp == 0) {
4046 inp = in6_pcblookup_hash_exists(pi, &s6, sport,
4047 &d6, dport, INPLOOKUP_WILDCARD,
4048 &pd->lookup.uid, &pd->lookup.gid, NULL);
4049 if (inp == 0)
4050 return (-1);
4051 }
4052 }
4053 }
4054 #else
4055 if (inp == 0) {
4056 inp = in_pcblookup_hash_exists(pi, saddr->v4, sport,
4057 daddr->v4, dport, INPLOOKUP_WILDCARD,
4058 &pd->lookup.uid, &pd->lookup.gid, NULL);
4059 if (inp == 0)
4060 return (-1);
4061 }
4062 #endif /* !INET6 */
4063 break;
4064 #endif /* INET */
4065 #if INET6
4066 case AF_INET6:
4067 inp = in6_pcblookup_hash_exists(pi, &saddr->v6, sport, &daddr->v6,
4068 dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
4069 if (inp == 0) {
4070 inp = in6_pcblookup_hash_exists(pi, &saddr->v6, sport,
4071 &daddr->v6, dport, INPLOOKUP_WILDCARD,
4072 &pd->lookup.uid, &pd->lookup.gid, NULL);
4073 if (inp == 0)
4074 return (-1);
4075 }
4076 break;
4077 #endif /* INET6 */
4078
4079 default:
4080 return (-1);
4081 }
4082
4083 return (1);
4084 }
4085
4086 static u_int8_t
4087 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
4088 {
4089 int hlen;
4090 u_int8_t hdr[60];
4091 u_int8_t *opt, optlen;
4092 u_int8_t wscale = 0;
4093
4094 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
4095 if (hlen <= (int)sizeof (struct tcphdr))
4096 return (0);
4097 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
4098 return (0);
4099 opt = hdr + sizeof (struct tcphdr);
4100 hlen -= sizeof (struct tcphdr);
4101 while (hlen >= 3) {
4102 switch (*opt) {
4103 case TCPOPT_EOL:
4104 case TCPOPT_NOP:
4105 ++opt;
4106 --hlen;
4107 break;
4108 case TCPOPT_WINDOW:
4109 wscale = opt[2];
4110 if (wscale > TCP_MAX_WINSHIFT)
4111 wscale = TCP_MAX_WINSHIFT;
4112 wscale |= PF_WSCALE_FLAG;
4113 /* FALLTHROUGH */
4114 default:
4115 optlen = opt[1];
4116 if (optlen < 2)
4117 optlen = 2;
4118 hlen -= optlen;
4119 opt += optlen;
4120 break;
4121 }
4122 }
4123 return (wscale);
4124 }
4125
4126 static u_int16_t
4127 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
4128 {
4129 int hlen;
4130 u_int8_t hdr[60];
4131 u_int8_t *opt, optlen;
4132 u_int16_t mss = tcp_mssdflt;
4133
4134 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
4135 if (hlen <= (int)sizeof (struct tcphdr))
4136 return (0);
4137 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
4138 return (0);
4139 opt = hdr + sizeof (struct tcphdr);
4140 hlen -= sizeof (struct tcphdr);
4141 while (hlen >= TCPOLEN_MAXSEG) {
4142 switch (*opt) {
4143 case TCPOPT_EOL:
4144 case TCPOPT_NOP:
4145 ++opt;
4146 --hlen;
4147 break;
4148 case TCPOPT_MAXSEG:
4149 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
4150 #if BYTE_ORDER != BIG_ENDIAN
4151 NTOHS(mss);
4152 #endif
4153 /* FALLTHROUGH */
4154 default:
4155 optlen = opt[1];
4156 if (optlen < 2)
4157 optlen = 2;
4158 hlen -= optlen;
4159 opt += optlen;
4160 break;
4161 }
4162 }
4163 return (mss);
4164 }
4165
4166 static u_int16_t
4167 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
4168 {
4169 #if INET
4170 struct sockaddr_in *dst;
4171 struct route ro;
4172 #endif /* INET */
4173 #if INET6
4174 struct sockaddr_in6 *dst6;
4175 struct route_in6 ro6;
4176 #endif /* INET6 */
4177 struct rtentry *rt = NULL;
4178 int hlen;
4179 u_int16_t mss = tcp_mssdflt;
4180
4181 switch (af) {
4182 #if INET
4183 case AF_INET:
4184 hlen = sizeof (struct ip);
4185 bzero(&ro, sizeof (ro));
4186 dst = (struct sockaddr_in *)&ro.ro_dst;
4187 dst->sin_family = AF_INET;
4188 dst->sin_len = sizeof (*dst);
4189 dst->sin_addr = addr->v4;
4190 rtalloc(&ro);
4191 rt = ro.ro_rt;
4192 break;
4193 #endif /* INET */
4194 #if INET6
4195 case AF_INET6:
4196 hlen = sizeof (struct ip6_hdr);
4197 bzero(&ro6, sizeof (ro6));
4198 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
4199 dst6->sin6_family = AF_INET6;
4200 dst6->sin6_len = sizeof (*dst6);
4201 dst6->sin6_addr = addr->v6;
4202 rtalloc((struct route *)&ro);
4203 rt = ro6.ro_rt;
4204 break;
4205 #endif /* INET6 */
4206 default:
4207 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4208 return (0);
4209 }
4210
4211 if (rt && rt->rt_ifp) {
4212 mss = rt->rt_ifp->if_mtu - hlen - sizeof (struct tcphdr);
4213 mss = max(tcp_mssdflt, mss);
4214 RTFREE(rt);
4215 }
4216 mss = min(mss, offer);
4217 mss = max(mss, 64); /* sanity - at least max opt space */
4218 return (mss);
4219 }
4220
4221 static void
4222 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
4223 {
4224 struct pf_rule *r = s->rule.ptr;
4225
4226 s->rt_kif = NULL;
4227 if (!r->rt || r->rt == PF_FASTROUTE)
4228 return;
4229 switch (s->state_key->af) {
4230 #if INET
4231 case AF_INET:
4232 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
4233 &s->nat_src_node);
4234 s->rt_kif = r->rpool.cur->kif;
4235 break;
4236 #endif /* INET */
4237 #if INET6
4238 case AF_INET6:
4239 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
4240 &s->nat_src_node);
4241 s->rt_kif = r->rpool.cur->kif;
4242 break;
4243 #endif /* INET6 */
4244 }
4245 }
4246
4247 static void
4248 pf_attach_state(struct pf_state_key *sk, struct pf_state *s, int tail)
4249 {
4250 s->state_key = sk;
4251 sk->refcnt++;
4252
4253 /* list is sorted, if-bound states before floating */
4254 if (tail)
4255 TAILQ_INSERT_TAIL(&sk->states, s, next);
4256 else
4257 TAILQ_INSERT_HEAD(&sk->states, s, next);
4258 }
4259
4260 static void
4261 pf_detach_state(struct pf_state *s, int flags)
4262 {
4263 struct pf_state_key *sk = s->state_key;
4264
4265 if (sk == NULL)
4266 return;
4267
4268 s->state_key = NULL;
4269 TAILQ_REMOVE(&sk->states, s, next);
4270 if (--sk->refcnt == 0) {
4271 if (!(flags & PF_DT_SKIP_EXTGWY))
4272 RB_REMOVE(pf_state_tree_ext_gwy,
4273 &pf_statetbl_ext_gwy, sk);
4274 if (!(flags & PF_DT_SKIP_LANEXT))
4275 RB_REMOVE(pf_state_tree_lan_ext,
4276 &pf_statetbl_lan_ext, sk);
4277 #ifndef NO_APPLE_EXTENSIONS
4278 if (sk->app_state)
4279 pool_put(&pf_app_state_pl, sk->app_state);
4280 #endif
4281 pool_put(&pf_state_key_pl, sk);
4282 }
4283 }
4284
4285 struct pf_state_key *
4286 pf_alloc_state_key(struct pf_state *s)
4287 {
4288 struct pf_state_key *sk;
4289
4290 if ((sk = pool_get(&pf_state_key_pl, PR_WAITOK)) == NULL)
4291 return (NULL);
4292 bzero(sk, sizeof (*sk));
4293 TAILQ_INIT(&sk->states);
4294 pf_attach_state(sk, s, 0);
4295
4296 return (sk);
4297 }
4298
4299 static u_int32_t
4300 pf_tcp_iss(struct pf_pdesc *pd)
4301 {
4302 MD5_CTX ctx;
4303 u_int32_t digest[4];
4304
4305 if (pf_tcp_secret_init == 0) {
4306 read_random(pf_tcp_secret, sizeof (pf_tcp_secret));
4307 MD5Init(&pf_tcp_secret_ctx);
4308 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
4309 sizeof (pf_tcp_secret));
4310 pf_tcp_secret_init = 1;
4311 }
4312 ctx = pf_tcp_secret_ctx;
4313
4314 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof (u_short));
4315 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof (u_short));
4316 if (pd->af == AF_INET6) {
4317 MD5Update(&ctx, (char *)&pd->src->v6, sizeof (struct in6_addr));
4318 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof (struct in6_addr));
4319 } else {
4320 MD5Update(&ctx, (char *)&pd->src->v4, sizeof (struct in_addr));
4321 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof (struct in_addr));
4322 }
4323 MD5Final((u_char *)digest, &ctx);
4324 pf_tcp_iss_off += 4096;
4325 return (digest[0] + random() + pf_tcp_iss_off);
4326 }
4327
4328 static int
4329 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
4330 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
4331 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
4332 struct ifqueue *ifq)
4333 {
4334 #pragma unused(h)
4335 struct pf_rule *nr = NULL;
4336 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4337 #ifdef NO_APPLE_EXTENSIONS
4338 u_int16_t bport, nport = 0;
4339 #endif
4340 sa_family_t af = pd->af;
4341 struct pf_rule *r, *a = NULL;
4342 struct pf_ruleset *ruleset = NULL;
4343 struct pf_src_node *nsn = NULL;
4344 struct tcphdr *th = pd->hdr.tcp;
4345 u_short reason;
4346 int rewrite = 0, hdrlen = 0;
4347 int tag = -1;
4348 unsigned int rtableid = IFSCOPE_NONE;
4349 int asd = 0;
4350 int match = 0;
4351 int state_icmp = 0;
4352 u_int16_t mss = tcp_mssdflt;
4353 #ifdef NO_APPLE_EXTENSIONS
4354 u_int16_t sport, dport;
4355 #endif
4356 u_int8_t icmptype = 0, icmpcode = 0;
4357
4358 #ifndef NO_APPLE_EXTENSIONS
4359 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
4360 union pf_state_xport bxport, nxport, sxport, dxport;
4361 #endif
4362
4363 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
4364
4365 if (direction == PF_IN && pf_check_congestion(ifq)) {
4366 REASON_SET(&reason, PFRES_CONGEST);
4367 return (PF_DROP);
4368 }
4369
4370 #ifndef NO_APPLE_EXTENSIONS
4371 hdrlen = 0;
4372 sxport.spi = 0;
4373 dxport.spi = 0;
4374 nxport.spi = 0;
4375 #else
4376 sport = dport = hdrlen = 0;
4377 #endif
4378
4379 switch (pd->proto) {
4380 case IPPROTO_TCP:
4381 #ifndef NO_APPLE_EXTENSIONS
4382 sxport.port = th->th_sport;
4383 dxport.port = th->th_dport;
4384 #else
4385 sport = th->th_sport;
4386 dport = th->th_dport;
4387 #endif
4388 hdrlen = sizeof (*th);
4389 break;
4390 case IPPROTO_UDP:
4391 #ifndef NO_APPLE_EXTENSIONS
4392 sxport.port = pd->hdr.udp->uh_sport;
4393 dxport.port = pd->hdr.udp->uh_dport;
4394 #else
4395 sport = pd->hdr.udp->uh_sport;
4396 dport = pd->hdr.udp->uh_dport;
4397 #endif
4398 hdrlen = sizeof (*pd->hdr.udp);
4399 break;
4400 #if INET
4401 case IPPROTO_ICMP:
4402 if (pd->af != AF_INET)
4403 break;
4404 #ifndef NO_APPLE_EXTENSIONS
4405 sxport.port = dxport.port = pd->hdr.icmp->icmp_id;
4406 hdrlen = ICMP_MINLEN;
4407 #else
4408 sport = dport = pd->hdr.icmp->icmp_id;
4409 #endif
4410 icmptype = pd->hdr.icmp->icmp_type;
4411 icmpcode = pd->hdr.icmp->icmp_code;
4412
4413 if (icmptype == ICMP_UNREACH ||
4414 icmptype == ICMP_SOURCEQUENCH ||
4415 icmptype == ICMP_REDIRECT ||
4416 icmptype == ICMP_TIMXCEED ||
4417 icmptype == ICMP_PARAMPROB)
4418 state_icmp++;
4419 break;
4420 #endif /* INET */
4421 #if INET6
4422 case IPPROTO_ICMPV6:
4423 if (pd->af != AF_INET6)
4424 break;
4425 #ifndef NO_APPLE_EXTENSIONS
4426 sxport.port = dxport.port = pd->hdr.icmp6->icmp6_id;
4427 #else
4428 sport = dport = pd->hdr.icmp6->icmp6_id;
4429 #endif
4430 hdrlen = sizeof (*pd->hdr.icmp6);
4431 icmptype = pd->hdr.icmp6->icmp6_type;
4432 icmpcode = pd->hdr.icmp6->icmp6_code;
4433
4434 if (icmptype == ICMP6_DST_UNREACH ||
4435 icmptype == ICMP6_PACKET_TOO_BIG ||
4436 icmptype == ICMP6_TIME_EXCEEDED ||
4437 icmptype == ICMP6_PARAM_PROB)
4438 state_icmp++;
4439 break;
4440 #endif /* INET6 */
4441 #ifndef NO_APPLE_EXTENSIONS
4442 case IPPROTO_GRE:
4443 if (pd->proto_variant == PF_GRE_PPTP_VARIANT) {
4444 sxport.call_id = dxport.call_id =
4445 pd->hdr.grev1->call_id;
4446 hdrlen = sizeof (*pd->hdr.grev1);
4447 }
4448 break;
4449 case IPPROTO_ESP:
4450 sxport.spi = 0;
4451 dxport.spi = pd->hdr.esp->spi;
4452 hdrlen = sizeof (*pd->hdr.esp);
4453 break;
4454 #endif
4455 }
4456
4457 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4458
4459 if (direction == PF_OUT) {
4460 #ifndef NO_APPLE_EXTENSIONS
4461 bxport = nxport = sxport;
4462 /* check outgoing packet for BINAT/NAT */
4463 if ((nr = pf_get_translation_aux(pd, m, off, PF_OUT, kif, &nsn,
4464 saddr, &sxport, daddr, &dxport, &pd->naddr, &nxport)) !=
4465 NULL) {
4466 #else
4467 bport = nport = sport;
4468 /* check outgoing packet for BINAT/NAT */
4469 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
4470 saddr, sport, daddr, dport, &pd->naddr, &nport)) != NULL) {
4471 #endif
4472 PF_ACPY(&pd->baddr, saddr, af);
4473 switch (pd->proto) {
4474 case IPPROTO_TCP:
4475 #ifndef NO_APPLE_EXTENSIONS
4476 pf_change_ap(direction, pd->mp, saddr,
4477 &th->th_sport, pd->ip_sum, &th->th_sum,
4478 &pd->naddr, nxport.port, 0, af);
4479 sxport.port = th->th_sport;
4480 #else
4481 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
4482 &th->th_sum, &pd->naddr, nport, 0, af);
4483 sport = th->th_sport;
4484 #endif
4485 rewrite++;
4486 break;
4487 case IPPROTO_UDP:
4488 #ifndef NO_APPLE_EXTENSIONS
4489 pf_change_ap(direction, pd->mp, saddr,
4490 &pd->hdr.udp->uh_sport, pd->ip_sum,
4491 &pd->hdr.udp->uh_sum, &pd->naddr,
4492 nxport.port, 1, af);
4493 sxport.port = pd->hdr.udp->uh_sport;
4494 #else
4495 pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
4496 pd->ip_sum, &pd->hdr.udp->uh_sum,
4497 &pd->naddr, nport, 1, af);
4498 sport = pd->hdr.udp->uh_sport;
4499 #endif
4500 rewrite++;
4501 break;
4502 #if INET
4503 case IPPROTO_ICMP:
4504 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
4505 pd->naddr.v4.s_addr, 0);
4506 #ifndef NO_APPLE_EXTENSIONS
4507 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
4508 pd->hdr.icmp->icmp_cksum, sxport.port,
4509 nxport.port, 0);
4510 pd->hdr.icmp->icmp_id = nxport.port;
4511 ++rewrite;
4512 #else
4513 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
4514 pd->hdr.icmp->icmp_cksum, sport, nport, 0);
4515 pd->hdr.icmp->icmp_id = nport;
4516 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
4517 #endif
4518 break;
4519 #endif /* INET */
4520 #if INET6
4521 case IPPROTO_ICMPV6:
4522 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
4523 &pd->naddr, 0);
4524 rewrite++;
4525 break;
4526 #endif /* INET */
4527 #ifndef NO_APPLE_EXTENSIONS
4528 case IPPROTO_GRE:
4529 switch (af) {
4530 #if INET
4531 case AF_INET:
4532 pf_change_a(&saddr->v4.s_addr,
4533 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4534 break;
4535 #endif /* INET */
4536 #if INET6
4537 case AF_INET6:
4538 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4539 break;
4540 #endif /* INET6 */
4541 }
4542 ++rewrite;
4543 break;
4544 case IPPROTO_ESP:
4545 bxport.spi = 0;
4546 switch (af) {
4547 #if INET
4548 case AF_INET:
4549 pf_change_a(&saddr->v4.s_addr,
4550 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4551 break;
4552 #endif /* INET */
4553 #if INET6
4554 case AF_INET6:
4555 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4556 break;
4557 #endif /* INET6 */
4558 }
4559 break;
4560 #endif
4561 default:
4562 switch (af) {
4563 #if INET
4564 case AF_INET:
4565 pf_change_a(&saddr->v4.s_addr,
4566 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4567 break;
4568 #endif /* INET */
4569 #if INET6
4570 case AF_INET6:
4571 PF_ACPY(saddr, &pd->naddr, af);
4572 break;
4573 #endif /* INET */
4574 }
4575 break;
4576 }
4577
4578 if (nr->natpass)
4579 r = NULL;
4580 pd->nat_rule = nr;
4581 }
4582 } else {
4583 #ifndef NO_APPLE_EXTENSIONS
4584 bxport.port = nxport.port = dxport.port;
4585 /* check incoming packet for BINAT/RDR */
4586 if ((nr = pf_get_translation_aux(pd, m, off, PF_IN, kif, &nsn,
4587 saddr, &sxport, daddr, &dxport, &pd->naddr, &nxport)) !=
4588 NULL) {
4589 #else
4590 bport = nport = dport;
4591 /* check incoming packet for BINAT/RDR */
4592 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
4593 saddr, sport, daddr, dport, &pd->naddr, &nport)) != NULL) {
4594 #endif
4595 PF_ACPY(&pd->baddr, daddr, af);
4596 switch (pd->proto) {
4597 case IPPROTO_TCP:
4598 #ifndef NO_APPLE_EXTENSIONS
4599 pf_change_ap(direction, pd->mp, daddr,
4600 &th->th_dport, pd->ip_sum, &th->th_sum,
4601 &pd->naddr, nxport.port, 0, af);
4602 dxport.port = th->th_dport;
4603 #else
4604 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
4605 &th->th_sum, &pd->naddr, nport, 0, af);
4606 dport = th->th_dport;
4607 #endif
4608 rewrite++;
4609 break;
4610 case IPPROTO_UDP:
4611 #ifndef NO_APPLE_EXTENSIONS
4612 pf_change_ap(direction, pd->mp, daddr,
4613 &pd->hdr.udp->uh_dport, pd->ip_sum,
4614 &pd->hdr.udp->uh_sum, &pd->naddr,
4615 nxport.port, 1, af);
4616 dxport.port = pd->hdr.udp->uh_dport;
4617 #else
4618 pf_change_ap(direction, daddr,
4619 &pd->hdr.udp->uh_dport,
4620 pd->ip_sum, &pd->hdr.udp->uh_sum,
4621 &pd->naddr, nport, 1, af);
4622 dport = pd->hdr.udp->uh_dport;
4623 #endif
4624 rewrite++;
4625 break;
4626 #if INET
4627 case IPPROTO_ICMP:
4628 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
4629 pd->naddr.v4.s_addr, 0);
4630 break;
4631 #endif /* INET */
4632 #if INET6
4633 case IPPROTO_ICMPV6:
4634 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
4635 &pd->naddr, 0);
4636 rewrite++;
4637 break;
4638 #endif /* INET6 */
4639 #ifndef NO_APPLE_EXTENSIONS
4640 case IPPROTO_GRE:
4641 if (pd->proto_variant == PF_GRE_PPTP_VARIANT)
4642 grev1->call_id = nxport.call_id;
4643
4644 switch (af) {
4645 #if INET
4646 case AF_INET:
4647 pf_change_a(&daddr->v4.s_addr,
4648 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4649 break;
4650 #endif /* INET */
4651 #if INET6
4652 case AF_INET6:
4653 PF_ACPY(daddr, &pd->naddr, AF_INET6);
4654 break;
4655 #endif /* INET6 */
4656 }
4657 ++rewrite;
4658 break;
4659 case IPPROTO_ESP:
4660 switch (af) {
4661 #if INET
4662 case AF_INET:
4663 pf_change_a(&daddr->v4.s_addr,
4664 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4665 break;
4666 #endif /* INET */
4667 #if INET6
4668 case AF_INET6:
4669 PF_ACPY(daddr, &pd->naddr, AF_INET6);
4670 break;
4671 #endif /* INET6 */
4672 }
4673 break;
4674 #endif
4675 default:
4676 switch (af) {
4677 #if INET
4678 case AF_INET:
4679 pf_change_a(&daddr->v4.s_addr,
4680 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4681 break;
4682 #endif /* INET */
4683 #if INET6
4684 case AF_INET6:
4685 PF_ACPY(daddr, &pd->naddr, af);
4686 break;
4687 #endif /* INET */
4688 }
4689 break;
4690 }
4691
4692 if (nr->natpass)
4693 r = NULL;
4694 pd->nat_rule = nr;
4695 }
4696 }
4697
4698 #ifndef NO_APPLE_EXTENSIONS
4699 if (nr && nr->tag > 0)
4700 tag = nr->tag;
4701 #endif
4702
4703 while (r != NULL) {
4704 r->evaluations++;
4705 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4706 r = r->skip[PF_SKIP_IFP].ptr;
4707 else if (r->direction && r->direction != direction)
4708 r = r->skip[PF_SKIP_DIR].ptr;
4709 else if (r->af && r->af != af)
4710 r = r->skip[PF_SKIP_AF].ptr;
4711 else if (r->proto && r->proto != pd->proto)
4712 r = r->skip[PF_SKIP_PROTO].ptr;
4713 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
4714 r->src.neg, kif))
4715 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4716 /* tcp/udp only. port_op always 0 in other cases */
4717 #ifndef NO_APPLE_EXTENSIONS
4718 else if (r->proto == pd->proto &&
4719 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4720 r->src.xport.range.op &&
4721 !pf_match_port(r->src.xport.range.op,
4722 r->src.xport.range.port[0], r->src.xport.range.port[1],
4723 th->th_sport))
4724 #else
4725 else if (r->src.port_op && !pf_match_port(r->src.port_op,
4726 r->src.port[0], r->src.port[1], th->th_sport))
4727 #endif
4728 r = r->skip[PF_SKIP_SRC_PORT].ptr;
4729 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
4730 r->dst.neg, NULL))
4731 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4732 /* tcp/udp only. port_op always 0 in other cases */
4733 #ifndef NO_APPLE_EXTENSIONS
4734 else if (r->proto == pd->proto &&
4735 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4736 r->dst.xport.range.op &&
4737 !pf_match_port(r->dst.xport.range.op,
4738 r->dst.xport.range.port[0], r->dst.xport.range.port[1],
4739 th->th_dport))
4740 #else
4741 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
4742 r->dst.port[0], r->dst.port[1], th->th_dport))
4743 #endif
4744 r = r->skip[PF_SKIP_DST_PORT].ptr;
4745 /* icmp only. type always 0 in other cases */
4746 else if (r->type && r->type != icmptype + 1)
4747 r = TAILQ_NEXT(r, entries);
4748 /* icmp only. type always 0 in other cases */
4749 else if (r->code && r->code != icmpcode + 1)
4750 r = TAILQ_NEXT(r, entries);
4751 else if (r->tos && !(r->tos == pd->tos))
4752 r = TAILQ_NEXT(r, entries);
4753 else if (r->rule_flag & PFRULE_FRAGMENT)
4754 r = TAILQ_NEXT(r, entries);
4755 else if (pd->proto == IPPROTO_TCP &&
4756 (r->flagset & th->th_flags) != r->flags)
4757 r = TAILQ_NEXT(r, entries);
4758 /* tcp/udp only. uid.op always 0 in other cases */
4759 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
4760 pf_socket_lookup(direction, pd), 1)) &&
4761 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
4762 pd->lookup.uid))
4763 r = TAILQ_NEXT(r, entries);
4764 /* tcp/udp only. gid.op always 0 in other cases */
4765 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
4766 pf_socket_lookup(direction, pd), 1)) &&
4767 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
4768 pd->lookup.gid))
4769 r = TAILQ_NEXT(r, entries);
4770 else if (r->prob && r->prob <= (random() % (UINT_MAX - 1) + 1))
4771 r = TAILQ_NEXT(r, entries);
4772 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
4773 r = TAILQ_NEXT(r, entries);
4774 else if (r->os_fingerprint != PF_OSFP_ANY &&
4775 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
4776 pf_osfp_fingerprint(pd, m, off, th),
4777 r->os_fingerprint)))
4778 r = TAILQ_NEXT(r, entries);
4779 else {
4780 if (r->tag)
4781 tag = r->tag;
4782 if (PF_RTABLEID_IS_VALID(r->rtableid))
4783 rtableid = r->rtableid;
4784 if (r->anchor == NULL) {
4785 match = 1;
4786 *rm = r;
4787 *am = a;
4788 *rsm = ruleset;
4789 if ((*rm)->quick)
4790 break;
4791 r = TAILQ_NEXT(r, entries);
4792 } else
4793 pf_step_into_anchor(&asd, &ruleset,
4794 PF_RULESET_FILTER, &r, &a, &match);
4795 }
4796 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4797 PF_RULESET_FILTER, &r, &a, &match))
4798 break;
4799 }
4800 r = *rm;
4801 a = *am;
4802 ruleset = *rsm;
4803
4804 REASON_SET(&reason, PFRES_MATCH);
4805
4806 if (r->log || (nr != NULL && nr->log)) {
4807 #ifndef NO_APPLE_EXTENSIONS
4808 if (rewrite > 0) {
4809 if (rewrite < off + hdrlen)
4810 rewrite = off + hdrlen;
4811
4812 m = pf_lazy_makewritable(pd, m, rewrite);
4813 if (!m) {
4814 REASON_SET(&reason, PFRES_MEMORY);
4815 return (PF_DROP);
4816 }
4817
4818 m_copyback(m, off, hdrlen, pd->hdr.any);
4819 }
4820 #else
4821 if (rewrite)
4822 m_copyback(m, off, hdrlen, pd->hdr.any);
4823 #endif
4824 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
4825 a, ruleset, pd);
4826 }
4827
4828 if ((r->action == PF_DROP) &&
4829 ((r->rule_flag & PFRULE_RETURNRST) ||
4830 (r->rule_flag & PFRULE_RETURNICMP) ||
4831 (r->rule_flag & PFRULE_RETURN))) {
4832 /* undo NAT changes, if they have taken place */
4833 if (nr != NULL) {
4834 if (direction == PF_OUT) {
4835 switch (pd->proto) {
4836 case IPPROTO_TCP:
4837 #ifndef NO_APPLE_EXTENSIONS
4838 pf_change_ap(direction, pd->mp, saddr,
4839 &th->th_sport, pd->ip_sum,
4840 &th->th_sum, &pd->baddr,
4841 bxport.port, 0, af);
4842 sxport.port = th->th_sport;
4843 #else
4844 pf_change_ap(saddr, &th->th_sport,
4845 pd->ip_sum, &th->th_sum,
4846 &pd->baddr, bport, 0, af);
4847 sport = th->th_sport;
4848 #endif
4849 rewrite++;
4850 break;
4851 case IPPROTO_UDP:
4852 #ifndef NO_APPLE_EXTENSIONS
4853 pf_change_ap(direction, pd->mp, saddr,
4854 &pd->hdr.udp->uh_sport, pd->ip_sum,
4855 &pd->hdr.udp->uh_sum, &pd->baddr,
4856 bxport.port, 1, af);
4857 sxport.port = pd->hdr.udp->uh_sport;
4858 #else
4859 pf_change_ap(saddr,
4860 &pd->hdr.udp->uh_sport, pd->ip_sum,
4861 &pd->hdr.udp->uh_sum, &pd->baddr,
4862 bport, 1, af);
4863 sport = pd->hdr.udp->uh_sport;
4864 #endif
4865 rewrite++;
4866 break;
4867 case IPPROTO_ICMP:
4868 #if INET6
4869 case IPPROTO_ICMPV6:
4870 #endif
4871 /* nothing! */
4872 break;
4873 #ifndef NO_APPLE_EXTENSIONS
4874 case IPPROTO_GRE:
4875 PF_ACPY(&pd->baddr, saddr, af);
4876 ++rewrite;
4877 switch (af) {
4878 #if INET
4879 case AF_INET:
4880 pf_change_a(&saddr->v4.s_addr,
4881 pd->ip_sum,
4882 pd->baddr.v4.s_addr, 0);
4883 break;
4884 #endif /* INET */
4885 #if INET6
4886 case AF_INET6:
4887 PF_ACPY(saddr, &pd->baddr,
4888 AF_INET6);
4889 break;
4890 #endif /* INET6 */
4891 }
4892 break;
4893 case IPPROTO_ESP:
4894 PF_ACPY(&pd->baddr, saddr, af);
4895 switch (af) {
4896 #if INET
4897 case AF_INET:
4898 pf_change_a(&saddr->v4.s_addr,
4899 pd->ip_sum,
4900 pd->baddr.v4.s_addr, 0);
4901 break;
4902 #endif /* INET */
4903 #if INET6
4904 case AF_INET6:
4905 PF_ACPY(saddr, &pd->baddr,
4906 AF_INET6);
4907 break;
4908 #endif /* INET6 */
4909 }
4910 break;
4911 #endif
4912 default:
4913 switch (af) {
4914 case AF_INET:
4915 pf_change_a(&saddr->v4.s_addr,
4916 pd->ip_sum,
4917 pd->baddr.v4.s_addr, 0);
4918 break;
4919 case AF_INET6:
4920 PF_ACPY(saddr, &pd->baddr, af);
4921 break;
4922 }
4923 }
4924 } else {
4925 switch (pd->proto) {
4926 case IPPROTO_TCP:
4927 #ifndef NO_APPLE_EXTENSIONS
4928 pf_change_ap(direction, pd->mp, daddr,
4929 &th->th_dport, pd->ip_sum,
4930 &th->th_sum, &pd->baddr,
4931 bxport.port, 0, af);
4932 dxport.port = th->th_dport;
4933 #else
4934 pf_change_ap(daddr, &th->th_dport,
4935 pd->ip_sum, &th->th_sum,
4936 &pd->baddr, bport, 0, af);
4937 dport = th->th_dport;
4938 #endif
4939 rewrite++;
4940 break;
4941 case IPPROTO_UDP:
4942 #ifndef NO_APPLE_EXTENSIONS
4943 pf_change_ap(direction, pd->mp, daddr,
4944 &pd->hdr.udp->uh_dport, pd->ip_sum,
4945 &pd->hdr.udp->uh_sum, &pd->baddr,
4946 bxport.port, 1, af);
4947 dxport.port = pd->hdr.udp->uh_dport;
4948 #else
4949 pf_change_ap(daddr,
4950 &pd->hdr.udp->uh_dport, pd->ip_sum,
4951 &pd->hdr.udp->uh_sum, &pd->baddr,
4952 bport, 1, af);
4953 dport = pd->hdr.udp->uh_dport;
4954 #endif
4955 rewrite++;
4956 break;
4957 case IPPROTO_ICMP:
4958 #if INET6
4959 case IPPROTO_ICMPV6:
4960 #endif
4961 /* nothing! */
4962 break;
4963 #ifndef NO_APPLE_EXTENSIONS
4964 case IPPROTO_GRE:
4965 if (pd->proto_variant ==
4966 PF_GRE_PPTP_VARIANT)
4967 grev1->call_id = bxport.call_id;
4968 ++rewrite;
4969 switch (af) {
4970 #if INET
4971 case AF_INET:
4972 pf_change_a(&daddr->v4.s_addr,
4973 pd->ip_sum,
4974 pd->baddr.v4.s_addr, 0);
4975 break;
4976 #endif /* INET */
4977 #if INET6
4978 case AF_INET6:
4979 PF_ACPY(daddr, &pd->baddr,
4980 AF_INET6);
4981 break;
4982 #endif /* INET6 */
4983 }
4984 break;
4985 case IPPROTO_ESP:
4986 switch (af) {
4987 #if INET
4988 case AF_INET:
4989 pf_change_a(&daddr->v4.s_addr,
4990 pd->ip_sum,
4991 pd->baddr.v4.s_addr, 0);
4992 break;
4993 #endif /* INET */
4994 #if INET6
4995 case AF_INET6:
4996 PF_ACPY(daddr, &pd->baddr,
4997 AF_INET6);
4998 break;
4999 #endif /* INET6 */
5000 }
5001 break;
5002 #endif
5003 default:
5004 switch (af) {
5005 case AF_INET:
5006 pf_change_a(&daddr->v4.s_addr,
5007 pd->ip_sum,
5008 pd->baddr.v4.s_addr, 0);
5009 break;
5010 #if INET6
5011 case AF_INET6:
5012 PF_ACPY(daddr, &pd->baddr, af);
5013 break;
5014 #endif /* INET6 */
5015 }
5016 }
5017 }
5018 }
5019 if (pd->proto == IPPROTO_TCP &&
5020 ((r->rule_flag & PFRULE_RETURNRST) ||
5021 (r->rule_flag & PFRULE_RETURN)) &&
5022 !(th->th_flags & TH_RST)) {
5023 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
5024 int len = 0;
5025 struct ip *h4;
5026 #if INET6
5027 struct ip6_hdr *h6;
5028 #endif /* INET6 */
5029
5030 switch (af) {
5031 case AF_INET:
5032 h4 = mtod(m, struct ip *);
5033 len = ntohs(h4->ip_len) - off;
5034 break;
5035 #if INET6
5036 case AF_INET6:
5037 h6 = mtod(m, struct ip6_hdr *);
5038 len = ntohs(h6->ip6_plen) -
5039 (off - sizeof (*h6));
5040 break;
5041 #endif /* INET6 */
5042 }
5043
5044 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
5045 REASON_SET(&reason, PFRES_PROTCKSUM);
5046 else {
5047 if (th->th_flags & TH_SYN)
5048 ack++;
5049 if (th->th_flags & TH_FIN)
5050 ack++;
5051 pf_send_tcp(r, af, pd->dst,
5052 pd->src, th->th_dport, th->th_sport,
5053 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
5054 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
5055 }
5056 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
5057 #ifndef NO_APPLE_EXTENSIONS
5058 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
5059 #endif
5060 r->return_icmp)
5061 pf_send_icmp(m, r->return_icmp >> 8,
5062 r->return_icmp & 255, af, r);
5063 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
5064 #ifndef NO_APPLE_EXTENSIONS
5065 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
5066 #endif
5067 r->return_icmp6)
5068 pf_send_icmp(m, r->return_icmp6 >> 8,
5069 r->return_icmp6 & 255, af, r);
5070 }
5071
5072 if (r->action == PF_DROP)
5073 return (PF_DROP);
5074
5075 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid)) {
5076 REASON_SET(&reason, PFRES_MEMORY);
5077 return (PF_DROP);
5078 }
5079
5080 if (!state_icmp && (r->keep_state || nr != NULL ||
5081 (pd->flags & PFDESC_TCP_NORM))) {
5082 /* create new state */
5083 struct pf_state *s = NULL;
5084 struct pf_state_key *sk = NULL;
5085 struct pf_src_node *sn = NULL;
5086 #ifndef NO_APPLE_EXTENSIONS
5087 struct pf_ike_hdr ike;
5088
5089 if (pd->proto == IPPROTO_UDP) {
5090 struct udphdr *uh = pd->hdr.udp;
5091 size_t plen = m->m_pkthdr.len - off - sizeof (*uh);
5092
5093 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
5094 ntohs(uh->uh_dport) == PF_IKE_PORT &&
5095 plen >= PF_IKE_PACKET_MINSIZE) {
5096 if (plen > PF_IKE_PACKET_MINSIZE)
5097 plen = PF_IKE_PACKET_MINSIZE;
5098 m_copydata(m, off + sizeof (*uh), plen, &ike);
5099 }
5100 }
5101
5102 if (nr != NULL && pd->proto == IPPROTO_ESP &&
5103 direction == PF_OUT) {
5104 struct pf_state_key_cmp sk0;
5105 struct pf_state *s0;
5106
5107 /*
5108 * <jhw@apple.com>
5109 * This squelches state creation if the external
5110 * address matches an existing incomplete state with a
5111 * different internal address. Only one 'blocking'
5112 * partial state is allowed for each external address.
5113 */
5114 memset(&sk0, 0, sizeof (sk0));
5115 sk0.af = pd->af;
5116 sk0.proto = IPPROTO_ESP;
5117 PF_ACPY(&sk0.gwy.addr, saddr, sk0.af);
5118 PF_ACPY(&sk0.ext.addr, daddr, sk0.af);
5119 s0 = pf_find_state(kif, &sk0, PF_IN);
5120
5121 if (s0 && PF_ANEQ(&s0->state_key->lan.addr,
5122 pd->src, pd->af)) {
5123 nsn = 0;
5124 goto cleanup;
5125 }
5126 }
5127 #endif
5128
5129 /* check maximums */
5130 if (r->max_states && (r->states >= r->max_states)) {
5131 pf_status.lcounters[LCNT_STATES]++;
5132 REASON_SET(&reason, PFRES_MAXSTATES);
5133 goto cleanup;
5134 }
5135 /* src node for filter rule */
5136 if ((r->rule_flag & PFRULE_SRCTRACK ||
5137 r->rpool.opts & PF_POOL_STICKYADDR) &&
5138 pf_insert_src_node(&sn, r, saddr, af) != 0) {
5139 REASON_SET(&reason, PFRES_SRCLIMIT);
5140 goto cleanup;
5141 }
5142 /* src node for translation rule */
5143 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
5144 ((direction == PF_OUT &&
5145 #ifndef NO_APPLE_EXTENSIONS
5146 nr->action != PF_RDR &&
5147 #endif
5148 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
5149 (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
5150 REASON_SET(&reason, PFRES_SRCLIMIT);
5151 goto cleanup;
5152 }
5153 s = pool_get(&pf_state_pl, PR_WAITOK);
5154 if (s == NULL) {
5155 REASON_SET(&reason, PFRES_MEMORY);
5156 cleanup:
5157 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
5158 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
5159 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
5160 pf_status.src_nodes--;
5161 pool_put(&pf_src_tree_pl, sn);
5162 }
5163 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
5164 nsn->expire == 0) {
5165 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
5166 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
5167 pf_status.src_nodes--;
5168 pool_put(&pf_src_tree_pl, nsn);
5169 }
5170 if (sk != NULL) {
5171 #ifndef NO_APPLE_EXTENSIONS
5172 if (sk->app_state)
5173 pool_put(&pf_app_state_pl,
5174 sk->app_state);
5175 #endif
5176 pool_put(&pf_state_key_pl, sk);
5177 }
5178 return (PF_DROP);
5179 }
5180 bzero(s, sizeof (*s));
5181 #ifndef NO_APPLE_EXTENSIONS
5182 TAILQ_INIT(&s->unlink_hooks);
5183 #endif
5184 s->rule.ptr = r;
5185 s->nat_rule.ptr = nr;
5186 s->anchor.ptr = a;
5187 STATE_INC_COUNTERS(s);
5188 s->allow_opts = r->allow_opts;
5189 s->log = r->log & PF_LOG_ALL;
5190 if (nr != NULL)
5191 s->log |= nr->log & PF_LOG_ALL;
5192 switch (pd->proto) {
5193 case IPPROTO_TCP:
5194 s->src.seqlo = ntohl(th->th_seq);
5195 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
5196 if ((th->th_flags & (TH_SYN|TH_ACK)) ==
5197 TH_SYN && r->keep_state == PF_STATE_MODULATE) {
5198 /* Generate sequence number modulator */
5199 if ((s->src.seqdiff = pf_tcp_iss(pd) -
5200 s->src.seqlo) == 0)
5201 s->src.seqdiff = 1;
5202 pf_change_a(&th->th_seq, &th->th_sum,
5203 htonl(s->src.seqlo + s->src.seqdiff), 0);
5204 rewrite = off + sizeof (*th);
5205 } else
5206 s->src.seqdiff = 0;
5207 if (th->th_flags & TH_SYN) {
5208 s->src.seqhi++;
5209 s->src.wscale = pf_get_wscale(m, off,
5210 th->th_off, af);
5211 }
5212 s->src.max_win = MAX(ntohs(th->th_win), 1);
5213 if (s->src.wscale & PF_WSCALE_MASK) {
5214 /* Remove scale factor from initial window */
5215 int win = s->src.max_win;
5216 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
5217 s->src.max_win = (win - 1) >>
5218 (s->src.wscale & PF_WSCALE_MASK);
5219 }
5220 if (th->th_flags & TH_FIN)
5221 s->src.seqhi++;
5222 s->dst.seqhi = 1;
5223 s->dst.max_win = 1;
5224 s->src.state = TCPS_SYN_SENT;
5225 s->dst.state = TCPS_CLOSED;
5226 s->timeout = PFTM_TCP_FIRST_PACKET;
5227 break;
5228 case IPPROTO_UDP:
5229 s->src.state = PFUDPS_SINGLE;
5230 s->dst.state = PFUDPS_NO_TRAFFIC;
5231 s->timeout = PFTM_UDP_FIRST_PACKET;
5232 break;
5233 case IPPROTO_ICMP:
5234 #if INET6
5235 case IPPROTO_ICMPV6:
5236 #endif
5237 s->timeout = PFTM_ICMP_FIRST_PACKET;
5238 break;
5239 #ifndef NO_APPLE_EXTENSIONS
5240 case IPPROTO_GRE:
5241 s->src.state = PFGRE1S_INITIATING;
5242 s->dst.state = PFGRE1S_NO_TRAFFIC;
5243 s->timeout = PFTM_GREv1_INITIATING;
5244 break;
5245 case IPPROTO_ESP:
5246 s->src.state = PFESPS_INITIATING;
5247 s->dst.state = PFESPS_NO_TRAFFIC;
5248 s->timeout = PFTM_ESP_FIRST_PACKET;
5249 break;
5250 #endif
5251 default:
5252 s->src.state = PFOTHERS_SINGLE;
5253 s->dst.state = PFOTHERS_NO_TRAFFIC;
5254 s->timeout = PFTM_OTHER_FIRST_PACKET;
5255 }
5256
5257 s->creation = pf_time_second();
5258 s->expire = pf_time_second();
5259
5260 if (sn != NULL) {
5261 s->src_node = sn;
5262 s->src_node->states++;
5263 VERIFY(s->src_node->states != 0);
5264 }
5265 if (nsn != NULL) {
5266 PF_ACPY(&nsn->raddr, &pd->naddr, af);
5267 s->nat_src_node = nsn;
5268 s->nat_src_node->states++;
5269 VERIFY(s->nat_src_node->states != 0);
5270 }
5271 if (pd->proto == IPPROTO_TCP) {
5272 if ((pd->flags & PFDESC_TCP_NORM) &&
5273 pf_normalize_tcp_init(m, off, pd, th, &s->src,
5274 &s->dst)) {
5275 REASON_SET(&reason, PFRES_MEMORY);
5276 pf_src_tree_remove_state(s);
5277 STATE_DEC_COUNTERS(s);
5278 pool_put(&pf_state_pl, s);
5279 return (PF_DROP);
5280 }
5281 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
5282 pf_normalize_tcp_stateful(m, off, pd, &reason,
5283 th, s, &s->src, &s->dst, &rewrite)) {
5284 /* This really shouldn't happen!!! */
5285 DPFPRINTF(PF_DEBUG_URGENT,
5286 ("pf_normalize_tcp_stateful failed on "
5287 "first pkt"));
5288 pf_normalize_tcp_cleanup(s);
5289 pf_src_tree_remove_state(s);
5290 STATE_DEC_COUNTERS(s);
5291 pool_put(&pf_state_pl, s);
5292 return (PF_DROP);
5293 }
5294 }
5295
5296 if ((sk = pf_alloc_state_key(s)) == NULL) {
5297 REASON_SET(&reason, PFRES_MEMORY);
5298 goto cleanup;
5299 }
5300
5301 sk->proto = pd->proto;
5302 sk->direction = direction;
5303 sk->af = af;
5304 #ifndef NO_APPLE_EXTENSIONS
5305 if (pd->proto == IPPROTO_UDP) {
5306 if (ntohs(pd->hdr.udp->uh_sport) == PF_IKE_PORT &&
5307 ntohs(pd->hdr.udp->uh_dport) == PF_IKE_PORT) {
5308 sk->proto_variant = PF_EXTFILTER_APD;
5309 } else {
5310 sk->proto_variant = nr ? nr->extfilter :
5311 r->extfilter;
5312 if (sk->proto_variant < PF_EXTFILTER_APD)
5313 sk->proto_variant = PF_EXTFILTER_APD;
5314 }
5315 } else if (pd->proto == IPPROTO_GRE) {
5316 sk->proto_variant = pd->proto_variant;
5317 }
5318 #endif
5319 if (direction == PF_OUT) {
5320 PF_ACPY(&sk->gwy.addr, saddr, af);
5321 PF_ACPY(&sk->ext.addr, daddr, af);
5322 switch (pd->proto) {
5323 #ifndef NO_APPLE_EXTENSIONS
5324 case IPPROTO_UDP:
5325 sk->gwy.xport = sxport;
5326 sk->ext.xport = dxport;
5327 break;
5328 case IPPROTO_ESP:
5329 sk->gwy.xport.spi = 0;
5330 sk->ext.xport.spi = pd->hdr.esp->spi;
5331 break;
5332 #endif
5333 case IPPROTO_ICMP:
5334 #if INET6
5335 case IPPROTO_ICMPV6:
5336 #endif
5337 #ifndef NO_APPLE_EXTENSIONS
5338 sk->gwy.xport.port = nxport.port;
5339 sk->ext.xport.spi = 0;
5340 #else
5341 sk->gwy.port = nport;
5342 sk->ext.port = 0;
5343 #endif
5344 break;
5345 default:
5346 #ifndef NO_APPLE_EXTENSIONS
5347 sk->gwy.xport = sxport;
5348 sk->ext.xport = dxport;
5349 break;
5350 #else
5351 sk->gwy.port = sport;
5352 sk->ext.port = dport;
5353 #endif
5354 }
5355 #ifndef NO_APPLE_EXTENSIONS
5356 if (nr != NULL) {
5357 PF_ACPY(&sk->lan.addr, &pd->baddr, af);
5358 sk->lan.xport = bxport;
5359 } else {
5360 PF_ACPY(&sk->lan.addr, &sk->gwy.addr, af);
5361 sk->lan.xport = sk->gwy.xport;
5362 }
5363 #else
5364 if (nr != NULL) {
5365 PF_ACPY(&sk->lan.addr, &pd->baddr, af);
5366 sk->lan.port = bport;
5367 } else {
5368 PF_ACPY(&sk->lan.addr, &sk->gwy.addr, af);
5369 sk->lan.port = sk->gwy.port;
5370 }
5371 #endif
5372 } else {
5373 PF_ACPY(&sk->lan.addr, daddr, af);
5374 PF_ACPY(&sk->ext.addr, saddr, af);
5375 switch (pd->proto) {
5376 case IPPROTO_ICMP:
5377 #if INET6
5378 case IPPROTO_ICMPV6:
5379 #endif
5380 #ifndef NO_APPLE_EXTENSIONS
5381 sk->lan.xport = nxport;
5382 sk->ext.xport.spi = 0;
5383 #else
5384 sk->lan.port = nport;
5385 sk->ext.port = 0;
5386 #endif
5387 break;
5388 #ifndef NO_APPLE_EXTENSIONS
5389 case IPPROTO_ESP:
5390 sk->ext.xport.spi = 0;
5391 sk->lan.xport.spi = pd->hdr.esp->spi;
5392 break;
5393 default:
5394 sk->lan.xport = dxport;
5395 sk->ext.xport = sxport;
5396 break;
5397 #else
5398 default:
5399 sk->lan.port = dport;
5400 sk->ext.port = sport;
5401 #endif
5402 }
5403 #ifndef NO_APPLE_EXTENSIONS
5404 if (nr != NULL) {
5405 PF_ACPY(&sk->gwy.addr, &pd->baddr, af);
5406 sk->gwy.xport = bxport;
5407 } else {
5408 PF_ACPY(&sk->gwy.addr, &sk->lan.addr, af);
5409 sk->gwy.xport = sk->lan.xport;
5410 }
5411 }
5412 #else
5413 if (nr != NULL) {
5414 PF_ACPY(&sk->gwy.addr, &pd->baddr, af);
5415 sk->gwy.port = bport;
5416 } else {
5417 PF_ACPY(&sk->gwy.addr, &sk->lan.addr, af);
5418 sk->gwy.port = sk->lan.port;
5419 }
5420 }
5421 #endif
5422
5423 pf_set_rt_ifp(s, saddr); /* needs s->state_key set */
5424
5425 #ifndef NO_APPLE_EXTENSIONS
5426 m = pd->mp;
5427
5428 if (sk->app_state == 0) {
5429 switch (pd->proto) {
5430 case IPPROTO_TCP: {
5431 u_int16_t dport = (direction == PF_OUT) ?
5432 sk->ext.xport.port : sk->gwy.xport.port;
5433
5434 if (nr != NULL &&
5435 ntohs(dport) == PF_PPTP_PORT) {
5436 struct pf_app_state *as;
5437
5438 as = pool_get(&pf_app_state_pl,
5439 PR_WAITOK);
5440 if (!as) {
5441 REASON_SET(&reason,
5442 PFRES_MEMORY);
5443 goto cleanup;
5444 }
5445
5446 bzero(as, sizeof (*as));
5447 as->handler = pf_pptp_handler;
5448 as->compare_lan_ext = 0;
5449 as->compare_ext_gwy = 0;
5450 as->u.pptp.grev1_state = 0;
5451 sk->app_state = as;
5452 (void) hook_establish(&s->unlink_hooks,
5453 0, (hook_fn_t) pf_pptp_unlink, s);
5454 }
5455 break;
5456 }
5457
5458 case IPPROTO_UDP: {
5459 struct udphdr *uh = pd->hdr.udp;
5460
5461 if (nr != NULL &&
5462 ntohs(uh->uh_sport) == PF_IKE_PORT &&
5463 ntohs(uh->uh_dport) == PF_IKE_PORT) {
5464 struct pf_app_state *as;
5465
5466 as = pool_get(&pf_app_state_pl,
5467 PR_WAITOK);
5468 if (!as) {
5469 REASON_SET(&reason,
5470 PFRES_MEMORY);
5471 goto cleanup;
5472 }
5473
5474 bzero(as, sizeof (*as));
5475 as->compare_lan_ext = pf_ike_compare;
5476 as->compare_ext_gwy = pf_ike_compare;
5477 as->u.ike.cookie = ike.initiator_cookie;
5478 sk->app_state = as;
5479 }
5480 break;
5481 }
5482
5483 default:
5484 break;
5485 }
5486 }
5487 #endif
5488
5489 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
5490 if (pd->proto == IPPROTO_TCP)
5491 pf_normalize_tcp_cleanup(s);
5492 REASON_SET(&reason, PFRES_STATEINS);
5493 pf_src_tree_remove_state(s);
5494 STATE_DEC_COUNTERS(s);
5495 pool_put(&pf_state_pl, s);
5496 return (PF_DROP);
5497 } else
5498 *sm = s;
5499 if (tag > 0) {
5500 pf_tag_ref(tag);
5501 s->tag = tag;
5502 }
5503 if (pd->proto == IPPROTO_TCP &&
5504 (th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
5505 r->keep_state == PF_STATE_SYNPROXY) {
5506 s->src.state = PF_TCPS_PROXY_SRC;
5507 if (nr != NULL) {
5508 #ifndef NO_APPLE_EXTENSIONS
5509 if (direction == PF_OUT) {
5510 pf_change_ap(direction, pd->mp, saddr,
5511 &th->th_sport, pd->ip_sum,
5512 &th->th_sum, &pd->baddr,
5513 bxport.port, 0, af);
5514 sxport.port = th->th_sport;
5515 } else {
5516 pf_change_ap(direction, pd->mp, daddr,
5517 &th->th_dport, pd->ip_sum,
5518 &th->th_sum, &pd->baddr,
5519 bxport.port, 0, af);
5520 sxport.port = th->th_dport;
5521 }
5522 #else
5523 if (direction == PF_OUT) {
5524 pf_change_ap(saddr, &th->th_sport,
5525 pd->ip_sum, &th->th_sum, &pd->baddr,
5526 bport, 0, af);
5527 sport = th->th_sport;
5528 } else {
5529 pf_change_ap(daddr, &th->th_dport,
5530 pd->ip_sum, &th->th_sum, &pd->baddr,
5531 bport, 0, af);
5532 sport = th->th_dport;
5533 }
5534 #endif
5535 }
5536 s->src.seqhi = htonl(random());
5537 /* Find mss option */
5538 mss = pf_get_mss(m, off, th->th_off, af);
5539 mss = pf_calc_mss(saddr, af, mss);
5540 mss = pf_calc_mss(daddr, af, mss);
5541 s->src.mss = mss;
5542 pf_send_tcp(r, af, daddr, saddr, th->th_dport,
5543 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
5544 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
5545 REASON_SET(&reason, PFRES_SYNPROXY);
5546 return (PF_SYNPROXY_DROP);
5547 }
5548
5549 #ifndef NO_APPLE_EXTENSIONS
5550 if (sk->app_state && sk->app_state->handler) {
5551 int offx = off;
5552
5553 switch (pd->proto) {
5554 case IPPROTO_TCP:
5555 offx += th->th_off << 2;
5556 break;
5557 case IPPROTO_UDP:
5558 offx += pd->hdr.udp->uh_ulen << 2;
5559 break;
5560 default:
5561 /* ALG handlers only apply to TCP and UDP rules */
5562 break;
5563 }
5564
5565 if (offx > off) {
5566 sk->app_state->handler(s, direction, offx,
5567 pd, kif);
5568 if (pd->lmw < 0) {
5569 REASON_SET(&reason, PFRES_MEMORY);
5570 return (PF_DROP);
5571 }
5572 m = pd->mp;
5573 }
5574 }
5575 #endif
5576 }
5577
5578 /* copy back packet headers if we performed NAT operations */
5579 #ifndef NO_APPLE_EXTENSIONS
5580 if (rewrite) {
5581 if (rewrite < off + hdrlen)
5582 rewrite = off + hdrlen;
5583
5584 m = pf_lazy_makewritable(pd, pd->mp, rewrite);
5585 if (!m) {
5586 REASON_SET(&reason, PFRES_MEMORY);
5587 return (PF_DROP);
5588 }
5589
5590 m_copyback(m, off, hdrlen, pd->hdr.any);
5591 }
5592 #else
5593 if (rewrite)
5594 m_copyback(m, off, hdrlen, pd->hdr.any);
5595 #endif
5596
5597 return (PF_PASS);
5598 }
5599
5600 static int
5601 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
5602 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
5603 struct pf_ruleset **rsm)
5604 {
5605 #pragma unused(h)
5606 struct pf_rule *r, *a = NULL;
5607 struct pf_ruleset *ruleset = NULL;
5608 sa_family_t af = pd->af;
5609 u_short reason;
5610 int tag = -1;
5611 int asd = 0;
5612 int match = 0;
5613
5614 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
5615 while (r != NULL) {
5616 r->evaluations++;
5617 if (pfi_kif_match(r->kif, kif) == r->ifnot)
5618 r = r->skip[PF_SKIP_IFP].ptr;
5619 else if (r->direction && r->direction != direction)
5620 r = r->skip[PF_SKIP_DIR].ptr;
5621 else if (r->af && r->af != af)
5622 r = r->skip[PF_SKIP_AF].ptr;
5623 else if (r->proto && r->proto != pd->proto)
5624 r = r->skip[PF_SKIP_PROTO].ptr;
5625 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
5626 r->src.neg, kif))
5627 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
5628 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
5629 r->dst.neg, NULL))
5630 r = r->skip[PF_SKIP_DST_ADDR].ptr;
5631 else if (r->tos && !(r->tos == pd->tos))
5632 r = TAILQ_NEXT(r, entries);
5633 else if (r->os_fingerprint != PF_OSFP_ANY)
5634 r = TAILQ_NEXT(r, entries);
5635 #ifndef NO_APPLE_EXTENSIONS
5636 else if (pd->proto == IPPROTO_UDP &&
5637 (r->src.xport.range.op || r->dst.xport.range.op))
5638 r = TAILQ_NEXT(r, entries);
5639 else if (pd->proto == IPPROTO_TCP &&
5640 (r->src.xport.range.op || r->dst.xport.range.op ||
5641 r->flagset))
5642 r = TAILQ_NEXT(r, entries);
5643 #else
5644 else if (pd->proto == IPPROTO_UDP &&
5645 (r->src.port_op || r->dst.port_op))
5646 r = TAILQ_NEXT(r, entries);
5647 else if (pd->proto == IPPROTO_TCP &&
5648 (r->src.port_op || r->dst.port_op || r->flagset))
5649 r = TAILQ_NEXT(r, entries);
5650 #endif
5651 else if ((pd->proto == IPPROTO_ICMP ||
5652 pd->proto == IPPROTO_ICMPV6) &&
5653 (r->type || r->code))
5654 r = TAILQ_NEXT(r, entries);
5655 else if (r->prob && r->prob <= (random() % (UINT_MAX - 1) + 1))
5656 r = TAILQ_NEXT(r, entries);
5657 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
5658 r = TAILQ_NEXT(r, entries);
5659 else {
5660 if (r->anchor == NULL) {
5661 match = 1;
5662 *rm = r;
5663 *am = a;
5664 *rsm = ruleset;
5665 if ((*rm)->quick)
5666 break;
5667 r = TAILQ_NEXT(r, entries);
5668 } else
5669 pf_step_into_anchor(&asd, &ruleset,
5670 PF_RULESET_FILTER, &r, &a, &match);
5671 }
5672 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
5673 PF_RULESET_FILTER, &r, &a, &match))
5674 break;
5675 }
5676 r = *rm;
5677 a = *am;
5678 ruleset = *rsm;
5679
5680 REASON_SET(&reason, PFRES_MATCH);
5681
5682 if (r->log)
5683 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
5684 pd);
5685
5686 if (r->action != PF_PASS)
5687 return (PF_DROP);
5688
5689 if (pf_tag_packet(m, pd->pf_mtag, tag, -1)) {
5690 REASON_SET(&reason, PFRES_MEMORY);
5691 return (PF_DROP);
5692 }
5693
5694 return (PF_PASS);
5695 }
5696
5697 #ifndef NO_APPLE_EXTENSIONS
5698 static void
5699 pf_pptp_handler(struct pf_state *s, int direction, int off,
5700 struct pf_pdesc *pd, struct pfi_kif *kif)
5701 {
5702 #pragma unused(direction)
5703 struct tcphdr *th;
5704 struct pf_pptp_state *pptps;
5705 struct pf_pptp_ctrl_msg cm;
5706 size_t plen;
5707 struct pf_state *gs;
5708 u_int16_t ct;
5709 u_int16_t *pac_call_id;
5710 u_int16_t *pns_call_id;
5711 u_int16_t *spoof_call_id;
5712 u_int8_t *pac_state;
5713 u_int8_t *pns_state;
5714 enum { PF_PPTP_PASS, PF_PPTP_INSERT_GRE, PF_PPTP_REMOVE_GRE } op;
5715 struct mbuf *m;
5716 struct pf_state_key *sk;
5717 struct pf_state_key *gsk;
5718 struct pf_app_state *gas;
5719
5720 sk = s->state_key;
5721 pptps = &sk->app_state->u.pptp;
5722 gs = pptps->grev1_state;
5723
5724 if (gs)
5725 gs->expire = pf_time_second();
5726
5727 m = pd->mp;
5728 plen = min(sizeof (cm), m->m_pkthdr.len - off);
5729 if (plen < PF_PPTP_CTRL_MSG_MINSIZE)
5730 return;
5731
5732 m_copydata(m, off, plen, &cm);
5733
5734 if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER)
5735 return;
5736 if (ntohs(cm.hdr.type) != 1)
5737 return;
5738
5739 if (!gs) {
5740 gs = pool_get(&pf_state_pl, PR_WAITOK);
5741 if (!gs)
5742 return;
5743
5744 memcpy(gs, s, sizeof (*gs));
5745
5746 memset(&gs->entry_id, 0, sizeof (gs->entry_id));
5747 memset(&gs->entry_list, 0, sizeof (gs->entry_list));
5748
5749 TAILQ_INIT(&gs->unlink_hooks);
5750 gs->rt_kif = NULL;
5751 gs->creation = 0;
5752 gs->pfsync_time = 0;
5753 gs->packets[0] = gs->packets[1] = 0;
5754 gs->bytes[0] = gs->bytes[1] = 0;
5755 gs->timeout = PFTM_UNLINKED;
5756 gs->id = gs->creatorid = 0;
5757 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
5758 gs->src.scrub = gs->dst.scrub = 0;
5759
5760 gas = pool_get(&pf_app_state_pl, PR_NOWAIT);
5761 if (!gas) {
5762 pool_put(&pf_state_pl, gs);
5763 return;
5764 }
5765
5766 gsk = pf_alloc_state_key(gs);
5767 if (!gsk) {
5768 pool_put(&pf_app_state_pl, gas);
5769 pool_put(&pf_state_pl, gs);
5770 return;
5771 }
5772
5773 memcpy(&gsk->lan, &sk->lan, sizeof (gsk->lan));
5774 memcpy(&gsk->gwy, &sk->gwy, sizeof (gsk->gwy));
5775 memcpy(&gsk->ext, &sk->ext, sizeof (gsk->ext));
5776 gsk->af = sk->af;
5777 gsk->proto = IPPROTO_GRE;
5778 gsk->proto_variant = PF_GRE_PPTP_VARIANT;
5779 gsk->app_state = gas;
5780 gsk->lan.xport.call_id = 0;
5781 gsk->gwy.xport.call_id = 0;
5782 gsk->ext.xport.call_id = 0;
5783 memset(gas, 0, sizeof (*gas));
5784 gas->u.grev1.pptp_state = s;
5785 STATE_INC_COUNTERS(gs);
5786 pptps->grev1_state = gs;
5787 (void) hook_establish(&gs->unlink_hooks, 0,
5788 (hook_fn_t) pf_grev1_unlink, gs);
5789 } else {
5790 gsk = gs->state_key;
5791 }
5792
5793 switch (sk->direction) {
5794 case PF_IN:
5795 pns_call_id = &gsk->ext.xport.call_id;
5796 pns_state = &gs->dst.state;
5797 pac_call_id = &gsk->lan.xport.call_id;
5798 pac_state = &gs->src.state;
5799 break;
5800
5801 case PF_OUT:
5802 pns_call_id = &gsk->lan.xport.call_id;
5803 pns_state = &gs->src.state;
5804 pac_call_id = &gsk->ext.xport.call_id;
5805 pac_state = &gs->dst.state;
5806 break;
5807
5808 default:
5809 DPFPRINTF(PF_DEBUG_URGENT,
5810 ("pf_pptp_handler: bad directional!\n"));
5811 return;
5812 }
5813
5814 spoof_call_id = 0;
5815 op = PF_PPTP_PASS;
5816
5817 ct = ntohs(cm.ctrl.type);
5818
5819 switch (ct) {
5820 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ:
5821 *pns_call_id = cm.msg.call_out_req.call_id;
5822 *pns_state = PFGRE1S_INITIATING;
5823 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5824 spoof_call_id = &cm.msg.call_out_req.call_id;
5825 break;
5826
5827 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY:
5828 *pac_call_id = cm.msg.call_out_rpy.call_id;
5829 if (s->nat_rule.ptr)
5830 spoof_call_id =
5831 (pac_call_id == &gsk->lan.xport.call_id) ?
5832 &cm.msg.call_out_rpy.call_id :
5833 &cm.msg.call_out_rpy.peer_call_id;
5834 if (gs->timeout == PFTM_UNLINKED) {
5835 *pac_state = PFGRE1S_INITIATING;
5836 op = PF_PPTP_INSERT_GRE;
5837 }
5838 break;
5839
5840 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST:
5841 *pns_call_id = cm.msg.call_in_1st.call_id;
5842 *pns_state = PFGRE1S_INITIATING;
5843 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5844 spoof_call_id = &cm.msg.call_in_1st.call_id;
5845 break;
5846
5847 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND:
5848 *pac_call_id = cm.msg.call_in_2nd.call_id;
5849 *pac_state = PFGRE1S_INITIATING;
5850 if (s->nat_rule.ptr)
5851 spoof_call_id =
5852 (pac_call_id == &gsk->lan.xport.call_id) ?
5853 &cm.msg.call_in_2nd.call_id :
5854 &cm.msg.call_in_2nd.peer_call_id;
5855 break;
5856
5857 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD:
5858 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5859 spoof_call_id = &cm.msg.call_in_3rd.call_id;
5860 if (cm.msg.call_in_3rd.call_id != *pns_call_id) {
5861 break;
5862 }
5863 if (gs->timeout == PFTM_UNLINKED)
5864 op = PF_PPTP_INSERT_GRE;
5865 break;
5866
5867 case PF_PPTP_CTRL_TYPE_CALL_CLR:
5868 if (cm.msg.call_clr.call_id != *pns_call_id)
5869 op = PF_PPTP_REMOVE_GRE;
5870 break;
5871
5872 case PF_PPTP_CTRL_TYPE_CALL_DISC:
5873 if (cm.msg.call_clr.call_id != *pac_call_id)
5874 op = PF_PPTP_REMOVE_GRE;
5875 break;
5876
5877 case PF_PPTP_CTRL_TYPE_ERROR:
5878 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5879 spoof_call_id = &cm.msg.error.peer_call_id;
5880 break;
5881
5882 case PF_PPTP_CTRL_TYPE_SET_LINKINFO:
5883 if (s->nat_rule.ptr && pac_call_id == &gsk->lan.xport.call_id)
5884 spoof_call_id = &cm.msg.set_linkinfo.peer_call_id;
5885 break;
5886
5887 default:
5888 op = PF_PPTP_PASS;
5889 break;
5890 }
5891
5892 if (!gsk->gwy.xport.call_id && gsk->lan.xport.call_id) {
5893 gsk->gwy.xport.call_id = gsk->lan.xport.call_id;
5894 if (spoof_call_id) {
5895 u_int16_t call_id = 0;
5896 int n = 0;
5897 struct pf_state_key_cmp key;
5898
5899 key.af = gsk->af;
5900 key.proto = IPPROTO_GRE;
5901 key.proto_variant = PF_GRE_PPTP_VARIANT;
5902 PF_ACPY(&key.gwy.addr, &gsk->gwy.addr, key.af);
5903 PF_ACPY(&key.ext.addr, &gsk->ext.addr, key.af);
5904 key.gwy.xport.call_id = gsk->gwy.xport.call_id;
5905 key.ext.xport.call_id = gsk->ext.xport.call_id;
5906 do {
5907 call_id = htonl(random());
5908 } while (!call_id);
5909
5910 while (pf_find_state_all(&key, PF_IN, 0)) {
5911 call_id = ntohs(call_id);
5912 --call_id;
5913 if (--call_id == 0) call_id = 0xffff;
5914 call_id = htons(call_id);
5915
5916 key.gwy.xport.call_id = call_id;
5917
5918 if (++n > 65535) {
5919 DPFPRINTF(PF_DEBUG_URGENT,
5920 ("pf_pptp_handler: failed to spoof "
5921 "call id\n"));
5922 key.gwy.xport.call_id = 0;
5923 break;
5924 }
5925 }
5926
5927 gsk->gwy.xport.call_id = call_id;
5928 }
5929 }
5930
5931 th = pd->hdr.tcp;
5932
5933 if (spoof_call_id && gsk->lan.xport.call_id != gsk->gwy.xport.call_id) {
5934 if (*spoof_call_id == gsk->gwy.xport.call_id) {
5935 *spoof_call_id = gsk->lan.xport.call_id;
5936 th->th_sum = pf_cksum_fixup(th->th_sum,
5937 gsk->gwy.xport.call_id, gsk->lan.xport.call_id, 0);
5938 } else {
5939 *spoof_call_id = gsk->gwy.xport.call_id;
5940 th->th_sum = pf_cksum_fixup(th->th_sum,
5941 gsk->lan.xport.call_id, gsk->gwy.xport.call_id, 0);
5942 }
5943
5944 m = pf_lazy_makewritable(pd, m, off + plen);
5945 if (!m) {
5946 pptps->grev1_state = NULL;
5947 STATE_DEC_COUNTERS(gs);
5948 pool_put(&pf_state_pl, gs);
5949 return;
5950 }
5951 m_copyback(m, off, plen, &cm);
5952 }
5953
5954 switch (op) {
5955 case PF_PPTP_REMOVE_GRE:
5956 gs->timeout = PFTM_PURGE;
5957 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
5958 gsk->lan.xport.call_id = 0;
5959 gsk->gwy.xport.call_id = 0;
5960 gsk->ext.xport.call_id = 0;
5961 gs->id = gs->creatorid = 0;
5962 break;
5963
5964 case PF_PPTP_INSERT_GRE:
5965 gs->creation = pf_time_second();
5966 gs->expire = pf_time_second();
5967 gs->timeout = PFTM_TCP_ESTABLISHED;
5968 if (gs->src_node != NULL) {
5969 ++gs->src_node->states;
5970 VERIFY(gs->src_node->states != 0);
5971 }
5972 if (gs->nat_src_node != NULL) {
5973 ++gs->nat_src_node->states;
5974 VERIFY(gs->nat_src_node->states != 0);
5975 }
5976 pf_set_rt_ifp(gs, &sk->lan.addr);
5977 if (pf_insert_state(BOUND_IFACE(s->rule.ptr, kif), gs)) {
5978
5979 /*
5980 * <jhw@apple.com>
5981 * FIX ME: insertion can fail when multiple PNS
5982 * behind the same NAT open calls to the same PAC
5983 * simultaneously because spoofed call ID numbers
5984 * are chosen before states are inserted. This is
5985 * hard to fix and happens infrequently enough that
5986 * users will normally try again and this ALG will
5987 * succeed. Failures are expected to be rare enough
5988 * that fixing this is a low priority.
5989 */
5990 pptps->grev1_state = NULL;
5991 pd->lmw = -1; /* Force PF_DROP on PFRES_MEMORY */
5992 pf_src_tree_remove_state(gs);
5993 STATE_DEC_COUNTERS(gs);
5994 pool_put(&pf_state_pl, gs);
5995 DPFPRINTF(PF_DEBUG_URGENT, ("pf_pptp_handler: error "
5996 "inserting GREv1 state.\n"));
5997 }
5998 break;
5999
6000 default:
6001 break;
6002 }
6003 }
6004
6005 static void
6006 pf_pptp_unlink(struct pf_state *s)
6007 {
6008 struct pf_app_state *as = s->state_key->app_state;
6009 struct pf_state *grev1s = as->u.pptp.grev1_state;
6010
6011 if (grev1s) {
6012 struct pf_app_state *gas = grev1s->state_key->app_state;
6013
6014 if (grev1s->timeout < PFTM_MAX)
6015 grev1s->timeout = PFTM_PURGE;
6016 gas->u.grev1.pptp_state = NULL;
6017 as->u.pptp.grev1_state = NULL;
6018 }
6019 }
6020
6021 static void
6022 pf_grev1_unlink(struct pf_state *s)
6023 {
6024 struct pf_app_state *as = s->state_key->app_state;
6025 struct pf_state *pptps = as->u.grev1.pptp_state;
6026
6027 if (pptps) {
6028 struct pf_app_state *pas = pptps->state_key->app_state;
6029
6030 pas->u.pptp.grev1_state = NULL;
6031 as->u.grev1.pptp_state = NULL;
6032 }
6033 }
6034
6035 static int
6036 pf_ike_compare(struct pf_app_state *a, struct pf_app_state *b)
6037 {
6038 int64_t d = a->u.ike.cookie - b->u.ike.cookie;
6039 return ((d > 0) ? 1 : ((d < 0) ? -1 : 0));
6040 }
6041 #endif
6042
6043 static int
6044 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
6045 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
6046 u_short *reason)
6047 {
6048 #pragma unused(h)
6049 struct pf_state_key_cmp key;
6050 struct tcphdr *th = pd->hdr.tcp;
6051 u_int16_t win = ntohs(th->th_win);
6052 u_int32_t ack, end, seq, orig_seq;
6053 u_int8_t sws, dws;
6054 int ackskew;
6055 int copyback = 0;
6056 struct pf_state_peer *src, *dst;
6057
6058 #ifndef NO_APPLE_EXTENSIONS
6059 key.app_state = 0;
6060 #endif
6061 key.af = pd->af;
6062 key.proto = IPPROTO_TCP;
6063 if (direction == PF_IN) {
6064 PF_ACPY(&key.ext.addr, pd->src, key.af);
6065 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
6066 #ifndef NO_APPLE_EXTENSIONS
6067 key.ext.xport.port = th->th_sport;
6068 key.gwy.xport.port = th->th_dport;
6069 #else
6070 key.ext.port = th->th_sport;
6071 key.gwy.port = th->th_dport;
6072 #endif
6073 } else {
6074 PF_ACPY(&key.lan.addr, pd->src, key.af);
6075 PF_ACPY(&key.ext.addr, pd->dst, key.af);
6076 #ifndef NO_APPLE_EXTENSIONS
6077 key.lan.xport.port = th->th_sport;
6078 key.ext.xport.port = th->th_dport;
6079 #else
6080 key.lan.port = th->th_sport;
6081 key.ext.port = th->th_dport;
6082 #endif
6083 }
6084
6085 STATE_LOOKUP();
6086
6087 if (direction == (*state)->state_key->direction) {
6088 src = &(*state)->src;
6089 dst = &(*state)->dst;
6090 } else {
6091 src = &(*state)->dst;
6092 dst = &(*state)->src;
6093 }
6094
6095 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
6096 if (direction != (*state)->state_key->direction) {
6097 REASON_SET(reason, PFRES_SYNPROXY);
6098 return (PF_SYNPROXY_DROP);
6099 }
6100 if (th->th_flags & TH_SYN) {
6101 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
6102 REASON_SET(reason, PFRES_SYNPROXY);
6103 return (PF_DROP);
6104 }
6105 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
6106 pd->src, th->th_dport, th->th_sport,
6107 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
6108 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
6109 0, NULL, NULL);
6110 REASON_SET(reason, PFRES_SYNPROXY);
6111 return (PF_SYNPROXY_DROP);
6112 } else if (!(th->th_flags & TH_ACK) ||
6113 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
6114 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
6115 REASON_SET(reason, PFRES_SYNPROXY);
6116 return (PF_DROP);
6117 } else if ((*state)->src_node != NULL &&
6118 pf_src_connlimit(state)) {
6119 REASON_SET(reason, PFRES_SRCLIMIT);
6120 return (PF_DROP);
6121 } else
6122 (*state)->src.state = PF_TCPS_PROXY_DST;
6123 }
6124 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
6125 struct pf_state_host *psrc, *pdst;
6126
6127 if (direction == PF_OUT) {
6128 psrc = &(*state)->state_key->gwy;
6129 pdst = &(*state)->state_key->ext;
6130 } else {
6131 psrc = &(*state)->state_key->ext;
6132 pdst = &(*state)->state_key->lan;
6133 }
6134 if (direction == (*state)->state_key->direction) {
6135 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
6136 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
6137 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
6138 REASON_SET(reason, PFRES_SYNPROXY);
6139 return (PF_DROP);
6140 }
6141 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
6142 if ((*state)->dst.seqhi == 1)
6143 (*state)->dst.seqhi = htonl(random());
6144 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
6145 #ifndef NO_APPLE_EXTENSIONS
6146 &pdst->addr, psrc->xport.port, pdst->xport.port,
6147 #else
6148 &pdst->addr, psrc->port, pdst->port,
6149 #endif
6150 (*state)->dst.seqhi, 0, TH_SYN, 0,
6151 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
6152 REASON_SET(reason, PFRES_SYNPROXY);
6153 return (PF_SYNPROXY_DROP);
6154 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
6155 (TH_SYN|TH_ACK)) ||
6156 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
6157 REASON_SET(reason, PFRES_SYNPROXY);
6158 return (PF_DROP);
6159 } else {
6160 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
6161 (*state)->dst.seqlo = ntohl(th->th_seq);
6162 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
6163 pd->src, th->th_dport, th->th_sport,
6164 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
6165 TH_ACK, (*state)->src.max_win, 0, 0, 0,
6166 (*state)->tag, NULL, NULL);
6167 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
6168 #ifndef NO_APPLE_EXTENSIONS
6169 &pdst->addr, psrc->xport.port, pdst->xport.port,
6170 #else
6171 &pdst->addr, psrc->port, pdst->port,
6172 #endif
6173 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
6174 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
6175 0, NULL, NULL);
6176 (*state)->src.seqdiff = (*state)->dst.seqhi -
6177 (*state)->src.seqlo;
6178 (*state)->dst.seqdiff = (*state)->src.seqhi -
6179 (*state)->dst.seqlo;
6180 (*state)->src.seqhi = (*state)->src.seqlo +
6181 (*state)->dst.max_win;
6182 (*state)->dst.seqhi = (*state)->dst.seqlo +
6183 (*state)->src.max_win;
6184 (*state)->src.wscale = (*state)->dst.wscale = 0;
6185 (*state)->src.state = (*state)->dst.state =
6186 TCPS_ESTABLISHED;
6187 REASON_SET(reason, PFRES_SYNPROXY);
6188 return (PF_SYNPROXY_DROP);
6189 }
6190 }
6191
6192 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
6193 dst->state >= TCPS_FIN_WAIT_2 &&
6194 src->state >= TCPS_FIN_WAIT_2) {
6195 if (pf_status.debug >= PF_DEBUG_MISC) {
6196 printf("pf: state reuse ");
6197 pf_print_state(*state);
6198 pf_print_flags(th->th_flags);
6199 printf("\n");
6200 }
6201 /* XXX make sure it's the same direction ?? */
6202 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
6203 pf_unlink_state(*state);
6204 *state = NULL;
6205 return (PF_DROP);
6206 }
6207
6208 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
6209 sws = src->wscale & PF_WSCALE_MASK;
6210 dws = dst->wscale & PF_WSCALE_MASK;
6211 } else
6212 sws = dws = 0;
6213
6214 /*
6215 * Sequence tracking algorithm from Guido van Rooij's paper:
6216 * http://www.madison-gurkha.com/publications/tcp_filtering/
6217 * tcp_filtering.ps
6218 */
6219
6220 orig_seq = seq = ntohl(th->th_seq);
6221 if (src->seqlo == 0) {
6222 /* First packet from this end. Set its state */
6223
6224 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
6225 src->scrub == NULL) {
6226 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
6227 REASON_SET(reason, PFRES_MEMORY);
6228 return (PF_DROP);
6229 }
6230 }
6231
6232 /* Deferred generation of sequence number modulator */
6233 if (dst->seqdiff && !src->seqdiff) {
6234 /* use random iss for the TCP server */
6235 while ((src->seqdiff = random() - seq) == 0)
6236 ;
6237 ack = ntohl(th->th_ack) - dst->seqdiff;
6238 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6239 src->seqdiff), 0);
6240 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6241 copyback = off + sizeof (*th);
6242 } else {
6243 ack = ntohl(th->th_ack);
6244 }
6245
6246 end = seq + pd->p_len;
6247 if (th->th_flags & TH_SYN) {
6248 end++;
6249 if (dst->wscale & PF_WSCALE_FLAG) {
6250 src->wscale = pf_get_wscale(m, off, th->th_off,
6251 pd->af);
6252 if (src->wscale & PF_WSCALE_FLAG) {
6253 /*
6254 * Remove scale factor from initial
6255 * window
6256 */
6257 sws = src->wscale & PF_WSCALE_MASK;
6258 win = ((u_int32_t)win + (1 << sws) - 1)
6259 >> sws;
6260 dws = dst->wscale & PF_WSCALE_MASK;
6261 } else {
6262 #ifndef NO_APPLE_MODIFICATION
6263 /*
6264 * <rdar://5786370>
6265 *
6266 * Window scale negotiation has failed,
6267 * therefore we must restore the window
6268 * scale in the state record that we
6269 * optimistically removed in
6270 * pf_test_rule(). Care is required to
6271 * prevent arithmetic overflow from
6272 * zeroing the window when it's
6273 * truncated down to 16-bits. --jhw
6274 */
6275 u_int32_t max_win = dst->max_win;
6276 max_win <<=
6277 dst->wscale & PF_WSCALE_MASK;
6278 dst->max_win = MIN(0xffff, max_win);
6279 #else
6280 /* fixup other window */
6281 dst->max_win <<= dst->wscale &
6282 PF_WSCALE_MASK;
6283 #endif
6284 /* in case of a retrans SYN|ACK */
6285 dst->wscale = 0;
6286 }
6287 }
6288 }
6289 if (th->th_flags & TH_FIN)
6290 end++;
6291
6292 src->seqlo = seq;
6293 if (src->state < TCPS_SYN_SENT)
6294 src->state = TCPS_SYN_SENT;
6295
6296 /*
6297 * May need to slide the window (seqhi may have been set by
6298 * the crappy stack check or if we picked up the connection
6299 * after establishment)
6300 */
6301 #ifndef NO_APPLE_MODIFICATIONS
6302 if (src->seqhi == 1 ||
6303 SEQ_GEQ(end + MAX(1, (u_int32_t)dst->max_win << dws),
6304 src->seqhi))
6305 src->seqhi = end + MAX(1, (u_int32_t)dst->max_win << dws);
6306 #else
6307 if (src->seqhi == 1 ||
6308 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
6309 src->seqhi = end + MAX(1, dst->max_win << dws);
6310 #endif
6311 if (win > src->max_win)
6312 src->max_win = win;
6313
6314 } else {
6315 ack = ntohl(th->th_ack) - dst->seqdiff;
6316 if (src->seqdiff) {
6317 /* Modulate sequence numbers */
6318 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6319 src->seqdiff), 0);
6320 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6321 copyback = off+ sizeof (*th);
6322 }
6323 end = seq + pd->p_len;
6324 if (th->th_flags & TH_SYN)
6325 end++;
6326 if (th->th_flags & TH_FIN)
6327 end++;
6328 }
6329
6330 if ((th->th_flags & TH_ACK) == 0) {
6331 /* Let it pass through the ack skew check */
6332 ack = dst->seqlo;
6333 } else if ((ack == 0 &&
6334 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
6335 /* broken tcp stacks do not set ack */
6336 (dst->state < TCPS_SYN_SENT)) {
6337 /*
6338 * Many stacks (ours included) will set the ACK number in an
6339 * FIN|ACK if the SYN times out -- no sequence to ACK.
6340 */
6341 ack = dst->seqlo;
6342 }
6343
6344 if (seq == end) {
6345 /* Ease sequencing restrictions on no data packets */
6346 seq = src->seqlo;
6347 end = seq;
6348 }
6349
6350 ackskew = dst->seqlo - ack;
6351
6352
6353 /*
6354 * Need to demodulate the sequence numbers in any TCP SACK options
6355 * (Selective ACK). We could optionally validate the SACK values
6356 * against the current ACK window, either forwards or backwards, but
6357 * I'm not confident that SACK has been implemented properly
6358 * everywhere. It wouldn't surprise me if several stacks accidently
6359 * SACK too far backwards of previously ACKed data. There really aren't
6360 * any security implications of bad SACKing unless the target stack
6361 * doesn't validate the option length correctly. Someone trying to
6362 * spoof into a TCP connection won't bother blindly sending SACK
6363 * options anyway.
6364 */
6365 if (dst->seqdiff && (th->th_off << 2) > (int)sizeof (struct tcphdr)) {
6366 #ifndef NO_APPLE_EXTENSIONS
6367 copyback = pf_modulate_sack(m, off, pd, th, dst);
6368 if (copyback == -1) {
6369 REASON_SET(reason, PFRES_MEMORY);
6370 return (PF_DROP);
6371 }
6372
6373 m = pd->mp;
6374 #else
6375 if (pf_modulate_sack(m, off, pd, th, dst))
6376 copyback = 1;
6377 #endif
6378 }
6379
6380
6381 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6382 if (SEQ_GEQ(src->seqhi, end) &&
6383 /* Last octet inside other's window space */
6384 #ifndef NO_APPLE_MODIFICATIONS
6385 SEQ_GEQ(seq, src->seqlo - ((u_int32_t)dst->max_win << dws)) &&
6386 #else
6387 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
6388 #endif
6389 /* Retrans: not more than one window back */
6390 (ackskew >= -MAXACKWINDOW) &&
6391 /* Acking not more than one reassembled fragment backwards */
6392 (ackskew <= (MAXACKWINDOW << sws)) &&
6393 /* Acking not more than one window forward */
6394 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
6395 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
6396 (pd->flags & PFDESC_IP_REAS) == 0)) {
6397 /* Require an exact/+1 sequence match on resets when possible */
6398
6399 if (dst->scrub || src->scrub) {
6400 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
6401 *state, src, dst, &copyback))
6402 return (PF_DROP);
6403
6404 #ifndef NO_APPLE_EXTENSIONS
6405 m = pd->mp;
6406 #endif
6407 }
6408
6409 /* update max window */
6410 if (src->max_win < win)
6411 src->max_win = win;
6412 /* synchronize sequencing */
6413 if (SEQ_GT(end, src->seqlo))
6414 src->seqlo = end;
6415 /* slide the window of what the other end can send */
6416 #ifndef NO_APPLE_MODIFICATIONS
6417 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6418 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
6419 #else
6420 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
6421 dst->seqhi = ack + MAX((win << sws), 1);
6422 #endif
6423
6424 /* update states */
6425 if (th->th_flags & TH_SYN)
6426 if (src->state < TCPS_SYN_SENT)
6427 src->state = TCPS_SYN_SENT;
6428 if (th->th_flags & TH_FIN)
6429 if (src->state < TCPS_CLOSING)
6430 src->state = TCPS_CLOSING;
6431 if (th->th_flags & TH_ACK) {
6432 if (dst->state == TCPS_SYN_SENT) {
6433 dst->state = TCPS_ESTABLISHED;
6434 if (src->state == TCPS_ESTABLISHED &&
6435 (*state)->src_node != NULL &&
6436 pf_src_connlimit(state)) {
6437 REASON_SET(reason, PFRES_SRCLIMIT);
6438 return (PF_DROP);
6439 }
6440 } else if (dst->state == TCPS_CLOSING)
6441 dst->state = TCPS_FIN_WAIT_2;
6442 }
6443 if (th->th_flags & TH_RST)
6444 src->state = dst->state = TCPS_TIME_WAIT;
6445
6446 /* update expire time */
6447 (*state)->expire = pf_time_second();
6448 if (src->state >= TCPS_FIN_WAIT_2 &&
6449 dst->state >= TCPS_FIN_WAIT_2)
6450 (*state)->timeout = PFTM_TCP_CLOSED;
6451 else if (src->state >= TCPS_CLOSING &&
6452 dst->state >= TCPS_CLOSING)
6453 (*state)->timeout = PFTM_TCP_FIN_WAIT;
6454 else if (src->state < TCPS_ESTABLISHED ||
6455 dst->state < TCPS_ESTABLISHED)
6456 (*state)->timeout = PFTM_TCP_OPENING;
6457 else if (src->state >= TCPS_CLOSING ||
6458 dst->state >= TCPS_CLOSING)
6459 (*state)->timeout = PFTM_TCP_CLOSING;
6460 else
6461 (*state)->timeout = PFTM_TCP_ESTABLISHED;
6462
6463 /* Fall through to PASS packet */
6464
6465 } else if ((dst->state < TCPS_SYN_SENT ||
6466 dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) &&
6467 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
6468 /* Within a window forward of the originating packet */
6469 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
6470 /* Within a window backward of the originating packet */
6471
6472 /*
6473 * This currently handles three situations:
6474 * 1) Stupid stacks will shotgun SYNs before their peer
6475 * replies.
6476 * 2) When PF catches an already established stream (the
6477 * firewall rebooted, the state table was flushed, routes
6478 * changed...)
6479 * 3) Packets get funky immediately after the connection
6480 * closes (this should catch Solaris spurious ACK|FINs
6481 * that web servers like to spew after a close)
6482 *
6483 * This must be a little more careful than the above code
6484 * since packet floods will also be caught here. We don't
6485 * update the TTL here to mitigate the damage of a packet
6486 * flood and so the same code can handle awkward establishment
6487 * and a loosened connection close.
6488 * In the establishment case, a correct peer response will
6489 * validate the connection, go through the normal state code
6490 * and keep updating the state TTL.
6491 */
6492
6493 if (pf_status.debug >= PF_DEBUG_MISC) {
6494 printf("pf: loose state match: ");
6495 pf_print_state(*state);
6496 pf_print_flags(th->th_flags);
6497 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6498 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
6499 pd->p_len, ackskew, (*state)->packets[0],
6500 (*state)->packets[1],
6501 direction == PF_IN ? "in" : "out",
6502 direction == (*state)->state_key->direction ?
6503 "fwd" : "rev");
6504 }
6505
6506 if (dst->scrub || src->scrub) {
6507 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
6508 *state, src, dst, &copyback))
6509 return (PF_DROP);
6510 #ifndef NO_APPLE_EXTENSIONS
6511 m = pd->mp;
6512 #endif
6513 }
6514
6515 /* update max window */
6516 if (src->max_win < win)
6517 src->max_win = win;
6518 /* synchronize sequencing */
6519 if (SEQ_GT(end, src->seqlo))
6520 src->seqlo = end;
6521 /* slide the window of what the other end can send */
6522 #ifndef NO_APPLE_MODIFICATIONS
6523 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6524 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
6525 #else
6526 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
6527 dst->seqhi = ack + MAX((win << sws), 1);
6528 #endif
6529
6530 /*
6531 * Cannot set dst->seqhi here since this could be a shotgunned
6532 * SYN and not an already established connection.
6533 */
6534
6535 if (th->th_flags & TH_FIN)
6536 if (src->state < TCPS_CLOSING)
6537 src->state = TCPS_CLOSING;
6538 if (th->th_flags & TH_RST)
6539 src->state = dst->state = TCPS_TIME_WAIT;
6540
6541 /* Fall through to PASS packet */
6542
6543 } else {
6544 if ((*state)->dst.state == TCPS_SYN_SENT &&
6545 (*state)->src.state == TCPS_SYN_SENT) {
6546 /* Send RST for state mismatches during handshake */
6547 if (!(th->th_flags & TH_RST))
6548 pf_send_tcp((*state)->rule.ptr, pd->af,
6549 pd->dst, pd->src, th->th_dport,
6550 th->th_sport, ntohl(th->th_ack), 0,
6551 TH_RST, 0, 0,
6552 (*state)->rule.ptr->return_ttl, 1, 0,
6553 pd->eh, kif->pfik_ifp);
6554 src->seqlo = 0;
6555 src->seqhi = 1;
6556 src->max_win = 1;
6557 } else if (pf_status.debug >= PF_DEBUG_MISC) {
6558 printf("pf: BAD state: ");
6559 pf_print_state(*state);
6560 pf_print_flags(th->th_flags);
6561 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6562 "pkts=%llu:%llu dir=%s,%s\n",
6563 seq, orig_seq, ack, pd->p_len, ackskew,
6564 (*state)->packets[0], (*state)->packets[1],
6565 direction == PF_IN ? "in" : "out",
6566 direction == (*state)->state_key->direction ?
6567 "fwd" : "rev");
6568 printf("pf: State failure on: %c %c %c %c | %c %c\n",
6569 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
6570 #ifndef NO_APPLE_MODIFICATIONS
6571 SEQ_GEQ(seq,
6572 src->seqlo - ((u_int32_t)dst->max_win << dws)) ?
6573 #else
6574 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
6575 #endif
6576 ' ': '2',
6577 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
6578 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
6579 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
6580 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
6581 }
6582 REASON_SET(reason, PFRES_BADSTATE);
6583 return (PF_DROP);
6584 }
6585
6586 /* Any packets which have gotten here are to be passed */
6587
6588 #ifndef NO_APPLE_EXTENSIONS
6589 if ((*state)->state_key->app_state &&
6590 (*state)->state_key->app_state->handler) {
6591 (*state)->state_key->app_state->handler(*state, direction,
6592 off + (th->th_off << 2), pd, kif);
6593 if (pd->lmw < 0) {
6594 REASON_SET(reason, PFRES_MEMORY);
6595 return (PF_DROP);
6596 }
6597 m = pd->mp;
6598 }
6599
6600 /* translate source/destination address, if necessary */
6601 if (STATE_TRANSLATE((*state)->state_key)) {
6602 if (direction == PF_OUT)
6603 pf_change_ap(direction, pd->mp, pd->src, &th->th_sport,
6604 pd->ip_sum, &th->th_sum,
6605 &(*state)->state_key->gwy.addr,
6606 (*state)->state_key->gwy.xport.port, 0, pd->af);
6607 else
6608 pf_change_ap(direction, pd->mp, pd->dst, &th->th_dport,
6609 pd->ip_sum, &th->th_sum,
6610 &(*state)->state_key->lan.addr,
6611 (*state)->state_key->lan.xport.port, 0, pd->af);
6612 copyback = off + sizeof (*th);
6613 }
6614
6615 if (copyback) {
6616 m = pf_lazy_makewritable(pd, m, copyback);
6617 if (!m) {
6618 REASON_SET(reason, PFRES_MEMORY);
6619 return (PF_DROP);
6620 }
6621
6622 /* Copyback sequence modulation or stateful scrub changes */
6623 m_copyback(m, off, sizeof (*th), th);
6624 }
6625 #else
6626 /* translate source/destination address, if necessary */
6627 if (STATE_TRANSLATE((*state)->state_key)) {
6628 if (direction == PF_OUT)
6629 pf_change_ap(pd->src, pd->mp, &th->th_sport, pd->ip_sum,
6630 &th->th_sum, &(*state)->state_key->gwy.addr,
6631 (*state)->state_key->gwy.port, 0, pd->af);
6632 else
6633 pf_change_ap(pd->dst, pd->mp, &th->th_dport, pd->ip_sum,
6634 &th->th_sum, &(*state)->state_key->lan.addr,
6635 (*state)->state_key->lan.port, 0, pd->af);
6636 m_copyback(m, off, sizeof (*th), th);
6637 } else if (copyback) {
6638 /* Copyback sequence modulation or stateful scrub changes */
6639 m_copyback(m, off, sizeof (*th), th);
6640 }
6641 #endif
6642
6643 return (PF_PASS);
6644 }
6645
6646 #ifndef NO_APPLE_EXTENSIONS
6647 static int
6648 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
6649 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
6650 #else
6651 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
6652 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
6653 #endif
6654 {
6655 #pragma unused(h)
6656 struct pf_state_peer *src, *dst;
6657 struct pf_state_key_cmp key;
6658 struct udphdr *uh = pd->hdr.udp;
6659 #ifndef NO_APPLE_EXTENSIONS
6660 struct pf_app_state as;
6661 int dx, action, extfilter;
6662 key.app_state = 0;
6663 key.proto_variant = PF_EXTFILTER_APD;
6664 #endif
6665
6666 key.af = pd->af;
6667 key.proto = IPPROTO_UDP;
6668 if (direction == PF_IN) {
6669 PF_ACPY(&key.ext.addr, pd->src, key.af);
6670 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
6671 #ifndef NO_APPLE_EXTENSIONS
6672 key.ext.xport.port = uh->uh_sport;
6673 key.gwy.xport.port = uh->uh_dport;
6674 dx = PF_IN;
6675 #else
6676 key.ext.port = uh->uh_sport;
6677 key.gwy.port = uh->uh_dport;
6678 #endif
6679 } else {
6680 PF_ACPY(&key.lan.addr, pd->src, key.af);
6681 PF_ACPY(&key.ext.addr, pd->dst, key.af);
6682 #ifndef NO_APPLE_EXTENSIONS
6683 key.lan.xport.port = uh->uh_sport;
6684 key.ext.xport.port = uh->uh_dport;
6685 dx = PF_OUT;
6686 #else
6687 key.lan.port = uh->uh_sport;
6688 key.ext.port = uh->uh_dport;
6689 #endif
6690 }
6691
6692 #ifndef NO_APPLE_EXTENSIONS
6693 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
6694 ntohs(uh->uh_dport) == PF_IKE_PORT) {
6695 struct pf_ike_hdr ike;
6696 size_t plen = m->m_pkthdr.len - off - sizeof (*uh);
6697 if (plen < PF_IKE_PACKET_MINSIZE) {
6698 DPFPRINTF(PF_DEBUG_MISC,
6699 ("pf: IKE message too small.\n"));
6700 return (PF_DROP);
6701 }
6702
6703 if (plen > sizeof (ike))
6704 plen = sizeof (ike);
6705 m_copydata(m, off + sizeof (*uh), plen, &ike);
6706
6707 if (ike.initiator_cookie) {
6708 key.app_state = &as;
6709 as.compare_lan_ext = pf_ike_compare;
6710 as.compare_ext_gwy = pf_ike_compare;
6711 as.u.ike.cookie = ike.initiator_cookie;
6712 } else {
6713 /*
6714 * <http://tools.ietf.org/html/\
6715 * draft-ietf-ipsec-nat-t-ike-01>
6716 * Support non-standard NAT-T implementations that
6717 * push the ESP packet over the top of the IKE packet.
6718 * Do not drop packet.
6719 */
6720 DPFPRINTF(PF_DEBUG_MISC,
6721 ("pf: IKE initiator cookie = 0.\n"));
6722 }
6723 }
6724
6725 *state = pf_find_state(kif, &key, dx);
6726
6727 if (!key.app_state && *state == 0) {
6728 key.proto_variant = PF_EXTFILTER_AD;
6729 *state = pf_find_state(kif, &key, dx);
6730 }
6731
6732 if (!key.app_state && *state == 0) {
6733 key.proto_variant = PF_EXTFILTER_EI;
6734 *state = pf_find_state(kif, &key, dx);
6735 }
6736
6737 if (pf_state_lookup_aux(state, kif, direction, &action))
6738 return (action);
6739 #else
6740 STATE_LOOKUP();
6741 #endif
6742
6743 if (direction == (*state)->state_key->direction) {
6744 src = &(*state)->src;
6745 dst = &(*state)->dst;
6746 } else {
6747 src = &(*state)->dst;
6748 dst = &(*state)->src;
6749 }
6750
6751 /* update states */
6752 if (src->state < PFUDPS_SINGLE)
6753 src->state = PFUDPS_SINGLE;
6754 if (dst->state == PFUDPS_SINGLE)
6755 dst->state = PFUDPS_MULTIPLE;
6756
6757 /* update expire time */
6758 (*state)->expire = pf_time_second();
6759 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
6760 (*state)->timeout = PFTM_UDP_MULTIPLE;
6761 else
6762 (*state)->timeout = PFTM_UDP_SINGLE;
6763
6764 #ifndef NO_APPLE_EXTENSIONS
6765 extfilter = (*state)->state_key->proto_variant;
6766 if (extfilter > PF_EXTFILTER_APD) {
6767 (*state)->state_key->ext.xport.port = key.ext.xport.port;
6768 if (extfilter > PF_EXTFILTER_AD)
6769 PF_ACPY(&(*state)->state_key->ext.addr,
6770 &key.ext.addr, key.af);
6771 }
6772
6773 if ((*state)->state_key->app_state &&
6774 (*state)->state_key->app_state->handler) {
6775 (*state)->state_key->app_state->handler(*state, direction,
6776 off + uh->uh_ulen, pd, kif);
6777 if (pd->lmw < 0) {
6778 REASON_SET(reason, PFRES_MEMORY);
6779 return (PF_DROP);
6780 }
6781 m = pd->mp;
6782 }
6783
6784 /* translate source/destination address, if necessary */
6785 if (STATE_TRANSLATE((*state)->state_key)) {
6786 m = pf_lazy_makewritable(pd, m, off + sizeof (*uh));
6787 if (!m) {
6788 REASON_SET(reason, PFRES_MEMORY);
6789 return (PF_DROP);
6790 }
6791
6792 if (direction == PF_OUT)
6793 pf_change_ap(direction, pd->mp, pd->src, &uh->uh_sport,
6794 pd->ip_sum, &uh->uh_sum,
6795 &(*state)->state_key->gwy.addr,
6796 (*state)->state_key->gwy.xport.port, 1, pd->af);
6797 else
6798 pf_change_ap(direction, pd->mp, pd->dst, &uh->uh_dport,
6799 pd->ip_sum, &uh->uh_sum,
6800 &(*state)->state_key->lan.addr,
6801 (*state)->state_key->lan.xport.port, 1, pd->af);
6802 m_copyback(m, off, sizeof (*uh), uh);
6803 }
6804 #else
6805 /* translate source/destination address, if necessary */
6806 if (STATE_TRANSLATE((*state)->state_key)) {
6807 if (direction == PF_OUT)
6808 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
6809 &uh->uh_sum, &(*state)->state_key->gwy.addr,
6810 (*state)->state_key->gwy.port, 1, pd->af);
6811 else
6812 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
6813 &uh->uh_sum, &(*state)->state_key->lan.addr,
6814 (*state)->state_key->lan.port, 1, pd->af);
6815 m_copyback(m, off, sizeof (*uh), uh);
6816 }
6817 #endif
6818
6819 return (PF_PASS);
6820 }
6821
6822 static int
6823 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
6824 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
6825 {
6826 #pragma unused(h)
6827 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
6828 u_int16_t icmpid = 0, *icmpsum;
6829 u_int8_t icmptype;
6830 int state_icmp = 0;
6831 struct pf_state_key_cmp key;
6832
6833 #ifndef NO_APPLE_EXTENSIONS
6834 struct pf_app_state as;
6835 key.app_state = 0;
6836 #endif
6837
6838 switch (pd->proto) {
6839 #if INET
6840 case IPPROTO_ICMP:
6841 icmptype = pd->hdr.icmp->icmp_type;
6842 icmpid = pd->hdr.icmp->icmp_id;
6843 icmpsum = &pd->hdr.icmp->icmp_cksum;
6844
6845 if (icmptype == ICMP_UNREACH ||
6846 icmptype == ICMP_SOURCEQUENCH ||
6847 icmptype == ICMP_REDIRECT ||
6848 icmptype == ICMP_TIMXCEED ||
6849 icmptype == ICMP_PARAMPROB)
6850 state_icmp++;
6851 break;
6852 #endif /* INET */
6853 #if INET6
6854 case IPPROTO_ICMPV6:
6855 icmptype = pd->hdr.icmp6->icmp6_type;
6856 icmpid = pd->hdr.icmp6->icmp6_id;
6857 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
6858
6859 if (icmptype == ICMP6_DST_UNREACH ||
6860 icmptype == ICMP6_PACKET_TOO_BIG ||
6861 icmptype == ICMP6_TIME_EXCEEDED ||
6862 icmptype == ICMP6_PARAM_PROB)
6863 state_icmp++;
6864 break;
6865 #endif /* INET6 */
6866 }
6867
6868 if (!state_icmp) {
6869
6870 /*
6871 * ICMP query/reply message not related to a TCP/UDP packet.
6872 * Search for an ICMP state.
6873 */
6874 key.af = pd->af;
6875 key.proto = pd->proto;
6876 if (direction == PF_IN) {
6877 PF_ACPY(&key.ext.addr, pd->src, key.af);
6878 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
6879 #ifndef NO_APPLE_EXTENSIONS
6880 key.ext.xport.port = 0;
6881 key.gwy.xport.port = icmpid;
6882 #else
6883 key.ext.port = 0;
6884 key.gwy.port = icmpid;
6885 #endif
6886 } else {
6887 PF_ACPY(&key.lan.addr, pd->src, key.af);
6888 PF_ACPY(&key.ext.addr, pd->dst, key.af);
6889 #ifndef NO_APPLE_EXTENSIONS
6890 key.lan.xport.port = icmpid;
6891 key.ext.xport.port = 0;
6892 #else
6893 key.lan.port = icmpid;
6894 key.ext.port = 0;
6895 #endif
6896 }
6897
6898 STATE_LOOKUP();
6899
6900 (*state)->expire = pf_time_second();
6901 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
6902
6903 /* translate source/destination address, if necessary */
6904 if (STATE_TRANSLATE((*state)->state_key)) {
6905 if (direction == PF_OUT) {
6906 switch (pd->af) {
6907 #if INET
6908 case AF_INET:
6909 pf_change_a(&saddr->v4.s_addr,
6910 pd->ip_sum,
6911 (*state)->state_key->gwy.addr.v4.s_addr, 0);
6912 #ifndef NO_APPLE_EXTENSIONS
6913 pd->hdr.icmp->icmp_cksum =
6914 pf_cksum_fixup(
6915 pd->hdr.icmp->icmp_cksum, icmpid,
6916 (*state)->state_key->gwy.xport.port, 0);
6917 pd->hdr.icmp->icmp_id =
6918 (*state)->state_key->gwy.xport.port;
6919 m = pf_lazy_makewritable(pd, m,
6920 off + ICMP_MINLEN);
6921 if (!m)
6922 return (PF_DROP);
6923 #else
6924 pd->hdr.icmp->icmp_cksum =
6925 pf_cksum_fixup(
6926 pd->hdr.icmp->icmp_cksum, icmpid,
6927 (*state)->state_key->gwy.port, 0);
6928 pd->hdr.icmp->icmp_id =
6929 (*state)->state_key->gwy.port;
6930 #endif
6931 m_copyback(m, off, ICMP_MINLEN,
6932 pd->hdr.icmp);
6933 break;
6934 #endif /* INET */
6935 #if INET6
6936 case AF_INET6:
6937 pf_change_a6(saddr,
6938 &pd->hdr.icmp6->icmp6_cksum,
6939 &(*state)->state_key->gwy.addr, 0);
6940 #ifndef NO_APPLE_EXTENSIONS
6941 m = pf_lazy_makewritable(pd, m,
6942 off + sizeof (struct icmp6_hdr));
6943 if (!m)
6944 return (PF_DROP);
6945 #endif
6946 m_copyback(m, off,
6947 sizeof (struct icmp6_hdr),
6948 pd->hdr.icmp6);
6949 break;
6950 #endif /* INET6 */
6951 }
6952 } else {
6953 switch (pd->af) {
6954 #if INET
6955 case AF_INET:
6956 pf_change_a(&daddr->v4.s_addr,
6957 pd->ip_sum,
6958 (*state)->state_key->lan.addr.v4.s_addr, 0);
6959 #ifndef NO_APPLE_EXTENSIONS
6960 pd->hdr.icmp->icmp_cksum =
6961 pf_cksum_fixup(
6962 pd->hdr.icmp->icmp_cksum, icmpid,
6963 (*state)->state_key->lan.xport.port, 0);
6964 pd->hdr.icmp->icmp_id =
6965 (*state)->state_key->lan.xport.port;
6966 m = pf_lazy_makewritable(pd, m,
6967 off + ICMP_MINLEN);
6968 if (!m)
6969 return (PF_DROP);
6970 #else
6971 pd->hdr.icmp->icmp_cksum =
6972 pf_cksum_fixup(
6973 pd->hdr.icmp->icmp_cksum, icmpid,
6974 (*state)->state_key->lan.port, 0);
6975 pd->hdr.icmp->icmp_id =
6976 (*state)->state_key->lan.port;
6977 #endif
6978 m_copyback(m, off, ICMP_MINLEN,
6979 pd->hdr.icmp);
6980 break;
6981 #endif /* INET */
6982 #if INET6
6983 case AF_INET6:
6984 pf_change_a6(daddr,
6985 &pd->hdr.icmp6->icmp6_cksum,
6986 &(*state)->state_key->lan.addr, 0);
6987 #ifndef NO_APPLE_EXTENSIONS
6988 m = pf_lazy_makewritable(pd, m,
6989 off + sizeof (struct icmp6_hdr));
6990 if (!m)
6991 return (PF_DROP);
6992 #endif
6993 m_copyback(m, off,
6994 sizeof (struct icmp6_hdr),
6995 pd->hdr.icmp6);
6996 break;
6997 #endif /* INET6 */
6998 }
6999 }
7000 }
7001
7002 return (PF_PASS);
7003
7004 } else {
7005 /*
7006 * ICMP error message in response to a TCP/UDP packet.
7007 * Extract the inner TCP/UDP header and search for that state.
7008 */
7009
7010 struct pf_pdesc pd2;
7011 #if INET
7012 struct ip h2;
7013 #endif /* INET */
7014 #if INET6
7015 struct ip6_hdr h2_6;
7016 int terminal = 0;
7017 #endif /* INET6 */
7018 int ipoff2 = 0;
7019 int off2 = 0;
7020
7021 memset(&pd2, 0, sizeof (pd2));
7022
7023 pd2.af = pd->af;
7024 switch (pd->af) {
7025 #if INET
7026 case AF_INET:
7027 /* offset of h2 in mbuf chain */
7028 ipoff2 = off + ICMP_MINLEN;
7029
7030 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof (h2),
7031 NULL, reason, pd2.af)) {
7032 DPFPRINTF(PF_DEBUG_MISC,
7033 ("pf: ICMP error message too short "
7034 "(ip)\n"));
7035 return (PF_DROP);
7036 }
7037 /*
7038 * ICMP error messages don't refer to non-first
7039 * fragments
7040 */
7041 if (h2.ip_off & htons(IP_OFFMASK)) {
7042 REASON_SET(reason, PFRES_FRAG);
7043 return (PF_DROP);
7044 }
7045
7046 /* offset of protocol header that follows h2 */
7047 off2 = ipoff2 + (h2.ip_hl << 2);
7048
7049 pd2.proto = h2.ip_p;
7050 pd2.src = (struct pf_addr *)&h2.ip_src;
7051 pd2.dst = (struct pf_addr *)&h2.ip_dst;
7052 pd2.ip_sum = &h2.ip_sum;
7053 break;
7054 #endif /* INET */
7055 #if INET6
7056 case AF_INET6:
7057 ipoff2 = off + sizeof (struct icmp6_hdr);
7058
7059 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof (h2_6),
7060 NULL, reason, pd2.af)) {
7061 DPFPRINTF(PF_DEBUG_MISC,
7062 ("pf: ICMP error message too short "
7063 "(ip6)\n"));
7064 return (PF_DROP);
7065 }
7066 pd2.proto = h2_6.ip6_nxt;
7067 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
7068 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
7069 pd2.ip_sum = NULL;
7070 off2 = ipoff2 + sizeof (h2_6);
7071 do {
7072 switch (pd2.proto) {
7073 case IPPROTO_FRAGMENT:
7074 /*
7075 * ICMPv6 error messages for
7076 * non-first fragments
7077 */
7078 REASON_SET(reason, PFRES_FRAG);
7079 return (PF_DROP);
7080 case IPPROTO_AH:
7081 case IPPROTO_HOPOPTS:
7082 case IPPROTO_ROUTING:
7083 case IPPROTO_DSTOPTS: {
7084 /* get next header and header length */
7085 struct ip6_ext opt6;
7086
7087 if (!pf_pull_hdr(m, off2, &opt6,
7088 sizeof (opt6), NULL, reason,
7089 pd2.af)) {
7090 DPFPRINTF(PF_DEBUG_MISC,
7091 ("pf: ICMPv6 short opt\n"));
7092 return (PF_DROP);
7093 }
7094 if (pd2.proto == IPPROTO_AH)
7095 off2 += (opt6.ip6e_len + 2) * 4;
7096 else
7097 off2 += (opt6.ip6e_len + 1) * 8;
7098 pd2.proto = opt6.ip6e_nxt;
7099 /* goto the next header */
7100 break;
7101 }
7102 default:
7103 terminal++;
7104 break;
7105 }
7106 } while (!terminal);
7107 break;
7108 #endif /* INET6 */
7109 }
7110
7111 switch (pd2.proto) {
7112 case IPPROTO_TCP: {
7113 struct tcphdr th;
7114 u_int32_t seq;
7115 struct pf_state_peer *src, *dst;
7116 u_int8_t dws;
7117 int copyback = 0;
7118
7119 /*
7120 * Only the first 8 bytes of the TCP header can be
7121 * expected. Don't access any TCP header fields after
7122 * th_seq, an ackskew test is not possible.
7123 */
7124 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
7125 pd2.af)) {
7126 DPFPRINTF(PF_DEBUG_MISC,
7127 ("pf: ICMP error message too short "
7128 "(tcp)\n"));
7129 return (PF_DROP);
7130 }
7131
7132 key.af = pd2.af;
7133 key.proto = IPPROTO_TCP;
7134 if (direction == PF_IN) {
7135 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7136 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7137 #ifndef NO_APPLE_EXTENSIONS
7138 key.ext.xport.port = th.th_dport;
7139 key.gwy.xport.port = th.th_sport;
7140 #else
7141 key.ext.port = th.th_dport;
7142 key.gwy.port = th.th_sport;
7143 #endif
7144 } else {
7145 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7146 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7147 #ifndef NO_APPLE_EXTENSIONS
7148 key.lan.xport.port = th.th_dport;
7149 key.ext.xport.port = th.th_sport;
7150 #else
7151 key.lan.port = th.th_dport;
7152 key.ext.port = th.th_sport;
7153 #endif
7154 }
7155
7156 STATE_LOOKUP();
7157
7158 if (direction == (*state)->state_key->direction) {
7159 src = &(*state)->dst;
7160 dst = &(*state)->src;
7161 } else {
7162 src = &(*state)->src;
7163 dst = &(*state)->dst;
7164 }
7165
7166 if (src->wscale && dst->wscale)
7167 dws = dst->wscale & PF_WSCALE_MASK;
7168 else
7169 dws = 0;
7170
7171 /* Demodulate sequence number */
7172 seq = ntohl(th.th_seq) - src->seqdiff;
7173 if (src->seqdiff) {
7174 pf_change_a(&th.th_seq, icmpsum,
7175 htonl(seq), 0);
7176 copyback = 1;
7177 }
7178
7179 if (!SEQ_GEQ(src->seqhi, seq) ||
7180 #ifndef NO_APPLE_MODIFICATION
7181 !SEQ_GEQ(seq,
7182 src->seqlo - ((u_int32_t)dst->max_win << dws))) {
7183 #else
7184 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))) {
7185 #endif
7186 if (pf_status.debug >= PF_DEBUG_MISC) {
7187 printf("pf: BAD ICMP %d:%d ",
7188 icmptype, pd->hdr.icmp->icmp_code);
7189 pf_print_host(pd->src, 0, pd->af);
7190 printf(" -> ");
7191 pf_print_host(pd->dst, 0, pd->af);
7192 printf(" state: ");
7193 pf_print_state(*state);
7194 printf(" seq=%u\n", seq);
7195 }
7196 REASON_SET(reason, PFRES_BADSTATE);
7197 return (PF_DROP);
7198 }
7199
7200 if (STATE_TRANSLATE((*state)->state_key)) {
7201 if (direction == PF_IN) {
7202 pf_change_icmp(pd2.src, &th.th_sport,
7203 daddr, &(*state)->state_key->lan.addr,
7204 #ifndef NO_APPLE_EXTENSIONS
7205 (*state)->state_key->lan.xport.port, NULL,
7206 #else
7207 (*state)->state_key->lan.port, NULL,
7208 #endif
7209 pd2.ip_sum, icmpsum,
7210 pd->ip_sum, 0, pd2.af);
7211 } else {
7212 pf_change_icmp(pd2.dst, &th.th_dport,
7213 saddr, &(*state)->state_key->gwy.addr,
7214 #ifndef NO_APPLE_EXTENSIONS
7215 (*state)->state_key->gwy.xport.port, NULL,
7216 #else
7217 (*state)->state_key->gwy.port, NULL,
7218 #endif
7219 pd2.ip_sum, icmpsum,
7220 pd->ip_sum, 0, pd2.af);
7221 }
7222 copyback = 1;
7223 }
7224
7225 if (copyback) {
7226 #ifndef NO_APPLE_EXTENSIONS
7227 m = pf_lazy_makewritable(pd, m, off2 + 8);
7228 if (!m)
7229 return (PF_DROP);
7230 #endif
7231 switch (pd2.af) {
7232 #if INET
7233 case AF_INET:
7234 m_copyback(m, off, ICMP_MINLEN,
7235 pd->hdr.icmp);
7236 m_copyback(m, ipoff2, sizeof (h2),
7237 &h2);
7238 break;
7239 #endif /* INET */
7240 #if INET6
7241 case AF_INET6:
7242 m_copyback(m, off,
7243 sizeof (struct icmp6_hdr),
7244 pd->hdr.icmp6);
7245 m_copyback(m, ipoff2, sizeof (h2_6),
7246 &h2_6);
7247 break;
7248 #endif /* INET6 */
7249 }
7250 m_copyback(m, off2, 8, &th);
7251 }
7252
7253 return (PF_PASS);
7254 break;
7255 }
7256 case IPPROTO_UDP: {
7257 struct udphdr uh;
7258 #ifndef NO_APPLE_EXTENSIONS
7259 int dx, action;
7260 #endif
7261 if (!pf_pull_hdr(m, off2, &uh, sizeof (uh),
7262 NULL, reason, pd2.af)) {
7263 DPFPRINTF(PF_DEBUG_MISC,
7264 ("pf: ICMP error message too short "
7265 "(udp)\n"));
7266 return (PF_DROP);
7267 }
7268
7269 key.af = pd2.af;
7270 key.proto = IPPROTO_UDP;
7271 if (direction == PF_IN) {
7272 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7273 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7274 #ifndef NO_APPLE_EXTENSIONS
7275 key.ext.xport.port = uh.uh_dport;
7276 key.gwy.xport.port = uh.uh_sport;
7277 dx = PF_IN;
7278 #else
7279 key.ext.port = uh.uh_dport;
7280 key.gwy.port = uh.uh_sport;
7281 #endif
7282 } else {
7283 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7284 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7285 #ifndef NO_APPLE_EXTENSIONS
7286 key.lan.xport.port = uh.uh_dport;
7287 key.ext.xport.port = uh.uh_sport;
7288 dx = PF_OUT;
7289 #else
7290 key.lan.port = uh.uh_dport;
7291 key.ext.port = uh.uh_sport;
7292 #endif
7293 }
7294
7295 #ifndef NO_APPLE_EXTENSIONS
7296 key.proto_variant = PF_EXTFILTER_APD;
7297
7298 if (ntohs(uh.uh_sport) == PF_IKE_PORT &&
7299 ntohs(uh.uh_dport) == PF_IKE_PORT) {
7300 struct pf_ike_hdr ike;
7301 size_t plen =
7302 m->m_pkthdr.len - off2 - sizeof (uh);
7303 if (direction == PF_IN &&
7304 plen < 8 /* PF_IKE_PACKET_MINSIZE */) {
7305 DPFPRINTF(PF_DEBUG_MISC, ("pf: "
7306 "ICMP error, embedded IKE message "
7307 "too small.\n"));
7308 return (PF_DROP);
7309 }
7310
7311 if (plen > sizeof (ike))
7312 plen = sizeof (ike);
7313 m_copydata(m, off + sizeof (uh), plen, &ike);
7314
7315 key.app_state = &as;
7316 as.compare_lan_ext = pf_ike_compare;
7317 as.compare_ext_gwy = pf_ike_compare;
7318 as.u.ike.cookie = ike.initiator_cookie;
7319 }
7320
7321 *state = pf_find_state(kif, &key, dx);
7322
7323 if (key.app_state && *state == 0) {
7324 key.app_state = 0;
7325 *state = pf_find_state(kif, &key, dx);
7326 }
7327
7328 if (*state == 0) {
7329 key.proto_variant = PF_EXTFILTER_AD;
7330 *state = pf_find_state(kif, &key, dx);
7331 }
7332
7333 if (*state == 0) {
7334 key.proto_variant = PF_EXTFILTER_EI;
7335 *state = pf_find_state(kif, &key, dx);
7336 }
7337
7338 if (pf_state_lookup_aux(state, kif, direction, &action))
7339 return (action);
7340 #else
7341 STATE_LOOKUP();
7342 #endif
7343
7344 if (STATE_TRANSLATE((*state)->state_key)) {
7345 if (direction == PF_IN) {
7346 pf_change_icmp(pd2.src, &uh.uh_sport,
7347 daddr, &(*state)->state_key->lan.addr,
7348 #ifndef NO_APPLE_EXTENSIONS
7349 (*state)->state_key->lan.xport.port, &uh.uh_sum,
7350 #else
7351 (*state)->state_key->lan.port, &uh.uh_sum,
7352 #endif
7353 pd2.ip_sum, icmpsum,
7354 pd->ip_sum, 1, pd2.af);
7355 } else {
7356 pf_change_icmp(pd2.dst, &uh.uh_dport,
7357 saddr, &(*state)->state_key->gwy.addr,
7358 #ifndef NO_APPLE_EXTENSIONS
7359 (*state)->state_key->gwy.xport.port, &uh.uh_sum,
7360 #else
7361 (*state)->state_key->gwy.port, &uh.uh_sum,
7362 #endif
7363 pd2.ip_sum, icmpsum,
7364 pd->ip_sum, 1, pd2.af);
7365 }
7366 #ifndef NO_APPLE_EXTENSIONS
7367 m = pf_lazy_makewritable(pd, m,
7368 off2 + sizeof (uh));
7369 if (!m)
7370 return (PF_DROP);
7371 #endif
7372 switch (pd2.af) {
7373 #if INET
7374 case AF_INET:
7375 m_copyback(m, off, ICMP_MINLEN,
7376 pd->hdr.icmp);
7377 m_copyback(m, ipoff2, sizeof (h2), &h2);
7378 break;
7379 #endif /* INET */
7380 #if INET6
7381 case AF_INET6:
7382 m_copyback(m, off,
7383 sizeof (struct icmp6_hdr),
7384 pd->hdr.icmp6);
7385 m_copyback(m, ipoff2, sizeof (h2_6),
7386 &h2_6);
7387 break;
7388 #endif /* INET6 */
7389 }
7390 m_copyback(m, off2, sizeof (uh), &uh);
7391 }
7392
7393 return (PF_PASS);
7394 break;
7395 }
7396 #if INET
7397 case IPPROTO_ICMP: {
7398 struct icmp iih;
7399
7400 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
7401 NULL, reason, pd2.af)) {
7402 DPFPRINTF(PF_DEBUG_MISC,
7403 ("pf: ICMP error message too short i"
7404 "(icmp)\n"));
7405 return (PF_DROP);
7406 }
7407
7408 key.af = pd2.af;
7409 key.proto = IPPROTO_ICMP;
7410 if (direction == PF_IN) {
7411 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7412 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7413 #ifndef NO_APPLE_EXTENSIONS
7414 key.ext.xport.port = 0;
7415 key.gwy.xport.port = iih.icmp_id;
7416 #else
7417 key.ext.port = 0;
7418 key.gwy.port = iih.icmp_id;
7419 #endif
7420 } else {
7421 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7422 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7423 #ifndef NO_APPLE_EXTENSIONS
7424 key.lan.xport.port = iih.icmp_id;
7425 key.ext.xport.port = 0;
7426 #else
7427 key.lan.port = iih.icmp_id;
7428 key.ext.port = 0;
7429 #endif
7430 }
7431
7432 STATE_LOOKUP();
7433
7434 if (STATE_TRANSLATE((*state)->state_key)) {
7435 if (direction == PF_IN) {
7436 pf_change_icmp(pd2.src, &iih.icmp_id,
7437 daddr, &(*state)->state_key->lan.addr,
7438 #ifndef NO_APPLE_EXTENSIONS
7439 (*state)->state_key->lan.xport.port, NULL,
7440 #else
7441 (*state)->state_key->lan.port, NULL,
7442 #endif
7443 pd2.ip_sum, icmpsum,
7444 pd->ip_sum, 0, AF_INET);
7445 } else {
7446 pf_change_icmp(pd2.dst, &iih.icmp_id,
7447 saddr, &(*state)->state_key->gwy.addr,
7448 #ifndef NO_APPLE_EXTENSIONS
7449 (*state)->state_key->gwy.xport.port, NULL,
7450 #else
7451 (*state)->state_key->gwy.port, NULL,
7452 #endif
7453 pd2.ip_sum, icmpsum,
7454 pd->ip_sum, 0, AF_INET);
7455 }
7456 #ifndef NO_APPLE_EXTENSIONS
7457 m = pf_lazy_makewritable(pd, m, off2 + ICMP_MINLEN);
7458 if (!m)
7459 return (PF_DROP);
7460 #endif
7461 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
7462 m_copyback(m, ipoff2, sizeof (h2), &h2);
7463 m_copyback(m, off2, ICMP_MINLEN, &iih);
7464 }
7465
7466 return (PF_PASS);
7467 break;
7468 }
7469 #endif /* INET */
7470 #if INET6
7471 case IPPROTO_ICMPV6: {
7472 struct icmp6_hdr iih;
7473
7474 if (!pf_pull_hdr(m, off2, &iih,
7475 sizeof (struct icmp6_hdr), NULL, reason, pd2.af)) {
7476 DPFPRINTF(PF_DEBUG_MISC,
7477 ("pf: ICMP error message too short "
7478 "(icmp6)\n"));
7479 return (PF_DROP);
7480 }
7481
7482 key.af = pd2.af;
7483 key.proto = IPPROTO_ICMPV6;
7484 if (direction == PF_IN) {
7485 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7486 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7487 #ifndef NO_APPLE_EXTENSIONS
7488 key.ext.xport.port = 0;
7489 key.gwy.xport.port = iih.icmp6_id;
7490 #else
7491 key.ext.port = 0;
7492 key.gwy.port = iih.icmp6_id;
7493 #endif
7494 } else {
7495 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7496 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7497 #ifndef NO_APPLE_EXTENSIONS
7498 key.lan.xport.port = iih.icmp6_id;
7499 key.ext.xport.port = 0;
7500 #else
7501 key.lan.port = iih.icmp6_id;
7502 key.ext.port = 0;
7503 #endif
7504 }
7505
7506 STATE_LOOKUP();
7507
7508 if (STATE_TRANSLATE((*state)->state_key)) {
7509 if (direction == PF_IN) {
7510 pf_change_icmp(pd2.src, &iih.icmp6_id,
7511 daddr, &(*state)->state_key->lan.addr,
7512 #ifndef NO_APPLE_EXTENSIONS
7513 (*state)->state_key->lan.xport.port, NULL,
7514 #else
7515 (*state)->state_key->lan.port, NULL,
7516 #endif
7517 pd2.ip_sum, icmpsum,
7518 pd->ip_sum, 0, AF_INET6);
7519 } else {
7520 pf_change_icmp(pd2.dst, &iih.icmp6_id,
7521 saddr, &(*state)->state_key->gwy.addr,
7522 #ifndef NO_APPLE_EXTENSIONS
7523 (*state)->state_key->gwy.xport.port, NULL,
7524 #else
7525 (*state)->state_key->gwy.port, NULL,
7526 #endif
7527 pd2.ip_sum, icmpsum,
7528 pd->ip_sum, 0, AF_INET6);
7529 }
7530 #ifndef NO_APPLE_EXTENSIONS
7531 m = pf_lazy_makewritable(pd, m, off2 +
7532 sizeof (struct icmp6_hdr));
7533 if (!m)
7534 return (PF_DROP);
7535 #endif
7536 m_copyback(m, off, sizeof (struct icmp6_hdr),
7537 pd->hdr.icmp6);
7538 m_copyback(m, ipoff2, sizeof (h2_6), &h2_6);
7539 m_copyback(m, off2, sizeof (struct icmp6_hdr),
7540 &iih);
7541 }
7542
7543 return (PF_PASS);
7544 break;
7545 }
7546 #endif /* INET6 */
7547 default: {
7548 key.af = pd2.af;
7549 key.proto = pd2.proto;
7550 if (direction == PF_IN) {
7551 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7552 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
7553 #ifndef NO_APPLE_EXTENSIONS
7554 key.ext.xport.port = 0;
7555 key.gwy.xport.port = 0;
7556 #else
7557 key.ext.port = 0;
7558 key.gwy.port = 0;
7559 #endif
7560 } else {
7561 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7562 PF_ACPY(&key.ext.addr, pd2.src, key.af);
7563 #ifndef NO_APPLE_EXTENSIONS
7564 key.lan.xport.port = 0;
7565 key.ext.xport.port = 0;
7566 #else
7567 key.lan.port = 0;
7568 key.ext.port = 0;
7569 #endif
7570 }
7571
7572 STATE_LOOKUP();
7573
7574 if (STATE_TRANSLATE((*state)->state_key)) {
7575 if (direction == PF_IN) {
7576 pf_change_icmp(pd2.src, NULL,
7577 daddr, &(*state)->state_key->lan.addr,
7578 0, NULL,
7579 pd2.ip_sum, icmpsum,
7580 pd->ip_sum, 0, pd2.af);
7581 } else {
7582 pf_change_icmp(pd2.dst, NULL,
7583 saddr, &(*state)->state_key->gwy.addr,
7584 0, NULL,
7585 pd2.ip_sum, icmpsum,
7586 pd->ip_sum, 0, pd2.af);
7587 }
7588 switch (pd2.af) {
7589 #if INET
7590 case AF_INET:
7591 #ifndef NO_APPLE_EXTENSIONS
7592 m = pf_lazy_makewritable(pd, m,
7593 ipoff2 + sizeof (h2));
7594 if (!m)
7595 return (PF_DROP);
7596 #endif
7597 m_copyback(m, off, ICMP_MINLEN,
7598 pd->hdr.icmp);
7599 m_copyback(m, ipoff2, sizeof (h2), &h2);
7600 break;
7601 #endif /* INET */
7602 #if INET6
7603 case AF_INET6:
7604 #ifndef NO_APPLE_EXTENSIONS
7605 m = pf_lazy_makewritable(pd, m,
7606 ipoff2 + sizeof (h2_6));
7607 if (!m)
7608 return (PF_DROP);
7609 #endif
7610 m_copyback(m, off,
7611 sizeof (struct icmp6_hdr),
7612 pd->hdr.icmp6);
7613 m_copyback(m, ipoff2, sizeof (h2_6),
7614 &h2_6);
7615 break;
7616 #endif /* INET6 */
7617 }
7618 }
7619
7620 return (PF_PASS);
7621 break;
7622 }
7623 }
7624 }
7625 }
7626
7627 #ifndef NO_APPLE_EXTENSIONS
7628 static int
7629 pf_test_state_grev1(struct pf_state **state, int direction,
7630 struct pfi_kif *kif, int off, struct pf_pdesc *pd)
7631 {
7632 struct pf_state_peer *src;
7633 struct pf_state_peer *dst;
7634 struct pf_state_key_cmp key;
7635 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
7636 struct mbuf *m;
7637
7638 key.app_state = 0;
7639 key.af = pd->af;
7640 key.proto = IPPROTO_GRE;
7641 key.proto_variant = PF_GRE_PPTP_VARIANT;
7642 if (direction == PF_IN) {
7643 PF_ACPY(&key.ext.addr, pd->src, key.af);
7644 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7645 key.gwy.xport.call_id = grev1->call_id;
7646 } else {
7647 PF_ACPY(&key.lan.addr, pd->src, key.af);
7648 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7649 key.ext.xport.call_id = grev1->call_id;
7650 }
7651
7652 STATE_LOOKUP();
7653
7654 if (direction == (*state)->state_key->direction) {
7655 src = &(*state)->src;
7656 dst = &(*state)->dst;
7657 } else {
7658 src = &(*state)->dst;
7659 dst = &(*state)->src;
7660 }
7661
7662 /* update states */
7663 if (src->state < PFGRE1S_INITIATING)
7664 src->state = PFGRE1S_INITIATING;
7665
7666 /* update expire time */
7667 (*state)->expire = pf_time_second();
7668 if (src->state >= PFGRE1S_INITIATING &&
7669 dst->state >= PFGRE1S_INITIATING) {
7670 if ((*state)->timeout != PFTM_TCP_ESTABLISHED)
7671 (*state)->timeout = PFTM_GREv1_ESTABLISHED;
7672 src->state = PFGRE1S_ESTABLISHED;
7673 dst->state = PFGRE1S_ESTABLISHED;
7674 } else {
7675 (*state)->timeout = PFTM_GREv1_INITIATING;
7676 }
7677
7678 if ((*state)->state_key->app_state)
7679 (*state)->state_key->app_state->u.grev1.pptp_state->expire =
7680 pf_time_second();
7681
7682 /* translate source/destination address, if necessary */
7683 if (STATE_GRE_TRANSLATE((*state)->state_key)) {
7684 if (direction == PF_OUT) {
7685 switch (pd->af) {
7686 #if INET
7687 case AF_INET:
7688 pf_change_a(&pd->src->v4.s_addr,
7689 pd->ip_sum,
7690 (*state)->state_key->gwy.addr.v4.s_addr, 0);
7691 break;
7692 #endif /* INET */
7693 #if INET6
7694 case AF_INET6:
7695 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
7696 pd->af);
7697 break;
7698 #endif /* INET6 */
7699 }
7700 } else {
7701 grev1->call_id = (*state)->state_key->lan.xport.call_id;
7702
7703 switch (pd->af) {
7704 #if INET
7705 case AF_INET:
7706 pf_change_a(&pd->dst->v4.s_addr,
7707 pd->ip_sum,
7708 (*state)->state_key->lan.addr.v4.s_addr, 0);
7709 break;
7710 #endif /* INET */
7711 #if INET6
7712 case AF_INET6:
7713 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
7714 pd->af);
7715 break;
7716 #endif /* INET6 */
7717 }
7718 }
7719
7720 m = pf_lazy_makewritable(pd, pd->mp, off + sizeof (*grev1));
7721 if (!m)
7722 return (PF_DROP);
7723 m_copyback(m, off, sizeof (*grev1), grev1);
7724 }
7725
7726 return (PF_PASS);
7727 }
7728
7729 int
7730 pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif,
7731 int off, struct pf_pdesc *pd)
7732 {
7733 #pragma unused(off)
7734 struct pf_state_peer *src;
7735 struct pf_state_peer *dst;
7736 struct pf_state_key_cmp key;
7737 struct pf_esp_hdr *esp = pd->hdr.esp;
7738 int action;
7739
7740 memset(&key, 0, sizeof (key));
7741 key.af = pd->af;
7742 key.proto = IPPROTO_ESP;
7743 if (direction == PF_IN) {
7744 PF_ACPY(&key.ext.addr, pd->src, key.af);
7745 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7746 key.gwy.xport.spi = esp->spi;
7747 } else {
7748 PF_ACPY(&key.lan.addr, pd->src, key.af);
7749 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7750 key.ext.xport.spi = esp->spi;
7751 }
7752
7753 *state = pf_find_state(kif, &key, direction);
7754
7755 if (*state == 0) {
7756 struct pf_state *s;
7757
7758 /*
7759 * <jhw@apple.com>
7760 * No matching state. Look for a blocking state. If we find
7761 * one, then use that state and move it so that it's keyed to
7762 * the SPI in the current packet.
7763 */
7764 if (direction == PF_IN) {
7765 key.gwy.xport.spi = 0;
7766
7767 s = pf_find_state(kif, &key, direction);
7768 if (s) {
7769 struct pf_state_key *sk = s->state_key;
7770
7771 RB_REMOVE(pf_state_tree_ext_gwy,
7772 &pf_statetbl_ext_gwy, sk);
7773 sk->lan.xport.spi = sk->gwy.xport.spi =
7774 esp->spi;
7775
7776 if (RB_INSERT(pf_state_tree_ext_gwy,
7777 &pf_statetbl_ext_gwy, sk))
7778 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
7779 else
7780 *state = s;
7781 }
7782 } else {
7783 key.ext.xport.spi = 0;
7784
7785 s = pf_find_state(kif, &key, direction);
7786 if (s) {
7787 struct pf_state_key *sk = s->state_key;
7788
7789 RB_REMOVE(pf_state_tree_lan_ext,
7790 &pf_statetbl_lan_ext, sk);
7791 sk->ext.xport.spi = esp->spi;
7792
7793 if (RB_INSERT(pf_state_tree_lan_ext,
7794 &pf_statetbl_lan_ext, sk))
7795 pf_detach_state(s, PF_DT_SKIP_LANEXT);
7796 else
7797 *state = s;
7798 }
7799 }
7800
7801 if (s) {
7802 if (*state == 0) {
7803 #if NPFSYNC
7804 if (s->creatorid == pf_status.hostid)
7805 pfsync_delete_state(s);
7806 #endif
7807 s->timeout = PFTM_UNLINKED;
7808 hook_runloop(&s->unlink_hooks,
7809 HOOK_REMOVE|HOOK_FREE);
7810 pf_src_tree_remove_state(s);
7811 pf_free_state(s);
7812 return (PF_DROP);
7813 }
7814 }
7815 }
7816
7817 if (pf_state_lookup_aux(state, kif, direction, &action))
7818 return (action);
7819
7820 if (direction == (*state)->state_key->direction) {
7821 src = &(*state)->src;
7822 dst = &(*state)->dst;
7823 } else {
7824 src = &(*state)->dst;
7825 dst = &(*state)->src;
7826 }
7827
7828 /* update states */
7829 if (src->state < PFESPS_INITIATING)
7830 src->state = PFESPS_INITIATING;
7831
7832 /* update expire time */
7833 (*state)->expire = pf_time_second();
7834 if (src->state >= PFESPS_INITIATING &&
7835 dst->state >= PFESPS_INITIATING) {
7836 (*state)->timeout = PFTM_ESP_ESTABLISHED;
7837 src->state = PFESPS_ESTABLISHED;
7838 dst->state = PFESPS_ESTABLISHED;
7839 } else {
7840 (*state)->timeout = PFTM_ESP_INITIATING;
7841 }
7842 /* translate source/destination address, if necessary */
7843 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
7844 if (direction == PF_OUT) {
7845 switch (pd->af) {
7846 #if INET
7847 case AF_INET:
7848 pf_change_a(&pd->src->v4.s_addr,
7849 pd->ip_sum,
7850 (*state)->state_key->gwy.addr.v4.s_addr, 0);
7851 break;
7852 #endif /* INET */
7853 #if INET6
7854 case AF_INET6:
7855 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
7856 pd->af);
7857 break;
7858 #endif /* INET6 */
7859 }
7860 } else {
7861 switch (pd->af) {
7862 #if INET
7863 case AF_INET:
7864 pf_change_a(&pd->dst->v4.s_addr,
7865 pd->ip_sum,
7866 (*state)->state_key->lan.addr.v4.s_addr, 0);
7867 break;
7868 #endif /* INET */
7869 #if INET6
7870 case AF_INET6:
7871 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
7872 pd->af);
7873 break;
7874 #endif /* INET6 */
7875 }
7876 }
7877 }
7878
7879 return (PF_PASS);
7880 }
7881 #endif
7882
7883 static int
7884 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
7885 struct pf_pdesc *pd)
7886 {
7887 struct pf_state_peer *src, *dst;
7888 struct pf_state_key_cmp key;
7889
7890 #ifndef NO_APPLE_EXTENSIONS
7891 key.app_state = 0;
7892 #endif
7893 key.af = pd->af;
7894 key.proto = pd->proto;
7895 if (direction == PF_IN) {
7896 PF_ACPY(&key.ext.addr, pd->src, key.af);
7897 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7898 #ifndef NO_APPLE_EXTENSIONS
7899 key.ext.xport.port = 0;
7900 key.gwy.xport.port = 0;
7901 #else
7902 key.ext.port = 0;
7903 key.gwy.port = 0;
7904 #endif
7905 } else {
7906 PF_ACPY(&key.lan.addr, pd->src, key.af);
7907 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7908 #ifndef NO_APPLE_EXTENSIONS
7909 key.lan.xport.port = 0;
7910 key.ext.xport.port = 0;
7911 #else
7912 key.lan.port = 0;
7913 key.ext.port = 0;
7914 #endif
7915 }
7916
7917 STATE_LOOKUP();
7918
7919 if (direction == (*state)->state_key->direction) {
7920 src = &(*state)->src;
7921 dst = &(*state)->dst;
7922 } else {
7923 src = &(*state)->dst;
7924 dst = &(*state)->src;
7925 }
7926
7927 /* update states */
7928 if (src->state < PFOTHERS_SINGLE)
7929 src->state = PFOTHERS_SINGLE;
7930 if (dst->state == PFOTHERS_SINGLE)
7931 dst->state = PFOTHERS_MULTIPLE;
7932
7933 /* update expire time */
7934 (*state)->expire = pf_time_second();
7935 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
7936 (*state)->timeout = PFTM_OTHER_MULTIPLE;
7937 else
7938 (*state)->timeout = PFTM_OTHER_SINGLE;
7939
7940 /* translate source/destination address, if necessary */
7941 #ifndef NO_APPLE_EXTENSIONS
7942 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
7943 #else
7944 if (STATE_TRANSLATE((*state)->state_key)) {
7945 #endif
7946 if (direction == PF_OUT) {
7947 switch (pd->af) {
7948 #if INET
7949 case AF_INET:
7950 pf_change_a(&pd->src->v4.s_addr,
7951 pd->ip_sum,
7952 (*state)->state_key->gwy.addr.v4.s_addr,
7953 0);
7954 break;
7955 #endif /* INET */
7956 #if INET6
7957 case AF_INET6:
7958 PF_ACPY(pd->src,
7959 &(*state)->state_key->gwy.addr, pd->af);
7960 break;
7961 #endif /* INET6 */
7962 }
7963 } else {
7964 switch (pd->af) {
7965 #if INET
7966 case AF_INET:
7967 pf_change_a(&pd->dst->v4.s_addr,
7968 pd->ip_sum,
7969 (*state)->state_key->lan.addr.v4.s_addr,
7970 0);
7971 break;
7972 #endif /* INET */
7973 #if INET6
7974 case AF_INET6:
7975 PF_ACPY(pd->dst,
7976 &(*state)->state_key->lan.addr, pd->af);
7977 break;
7978 #endif /* INET6 */
7979 }
7980 }
7981 }
7982
7983 return (PF_PASS);
7984 }
7985
7986 /*
7987 * ipoff and off are measured from the start of the mbuf chain.
7988 * h must be at "ipoff" on the mbuf chain.
7989 */
7990 void *
7991 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
7992 u_short *actionp, u_short *reasonp, sa_family_t af)
7993 {
7994 switch (af) {
7995 #if INET
7996 case AF_INET: {
7997 struct ip *h = mtod(m, struct ip *);
7998 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
7999
8000 if (fragoff) {
8001 if (fragoff >= len) {
8002 ACTION_SET(actionp, PF_PASS);
8003 } else {
8004 ACTION_SET(actionp, PF_DROP);
8005 REASON_SET(reasonp, PFRES_FRAG);
8006 }
8007 return (NULL);
8008 }
8009 if (m->m_pkthdr.len < off + len ||
8010 ntohs(h->ip_len) < off + len) {
8011 ACTION_SET(actionp, PF_DROP);
8012 REASON_SET(reasonp, PFRES_SHORT);
8013 return (NULL);
8014 }
8015 break;
8016 }
8017 #endif /* INET */
8018 #if INET6
8019 case AF_INET6: {
8020 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
8021
8022 if (m->m_pkthdr.len < off + len ||
8023 (ntohs(h->ip6_plen) + sizeof (struct ip6_hdr)) <
8024 (unsigned)(off + len)) {
8025 ACTION_SET(actionp, PF_DROP);
8026 REASON_SET(reasonp, PFRES_SHORT);
8027 return (NULL);
8028 }
8029 break;
8030 }
8031 #endif /* INET6 */
8032 }
8033 m_copydata(m, off, len, p);
8034 return (p);
8035 }
8036
8037 int
8038 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
8039 {
8040 #pragma unused(kif)
8041 struct sockaddr_in *dst;
8042 int ret = 1;
8043 #if INET6
8044 struct sockaddr_in6 *dst6;
8045 struct route_in6 ro;
8046 #else
8047 struct route ro;
8048 #endif
8049
8050 bzero(&ro, sizeof (ro));
8051 switch (af) {
8052 case AF_INET:
8053 dst = satosin(&ro.ro_dst);
8054 dst->sin_family = AF_INET;
8055 dst->sin_len = sizeof (*dst);
8056 dst->sin_addr = addr->v4;
8057 break;
8058 #if INET6
8059 case AF_INET6:
8060 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
8061 dst6->sin6_family = AF_INET6;
8062 dst6->sin6_len = sizeof (*dst6);
8063 dst6->sin6_addr = addr->v6;
8064 break;
8065 #endif /* INET6 */
8066 default:
8067 return (0);
8068 }
8069
8070 /* XXX: IFT_ENC is not currently used by anything*/
8071 /* Skip checks for ipsec interfaces */
8072 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
8073 goto out;
8074
8075 rtalloc((struct route *)&ro);
8076
8077 out:
8078 if (ro.ro_rt != NULL)
8079 RTFREE(ro.ro_rt);
8080 return (ret);
8081 }
8082
8083 int
8084 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
8085 {
8086 #pragma unused(aw)
8087 struct sockaddr_in *dst;
8088 #if INET6
8089 struct sockaddr_in6 *dst6;
8090 struct route_in6 ro;
8091 #else
8092 struct route ro;
8093 #endif
8094 int ret = 0;
8095
8096 bzero(&ro, sizeof (ro));
8097 switch (af) {
8098 case AF_INET:
8099 dst = satosin(&ro.ro_dst);
8100 dst->sin_family = AF_INET;
8101 dst->sin_len = sizeof (*dst);
8102 dst->sin_addr = addr->v4;
8103 break;
8104 #if INET6
8105 case AF_INET6:
8106 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
8107 dst6->sin6_family = AF_INET6;
8108 dst6->sin6_len = sizeof (*dst6);
8109 dst6->sin6_addr = addr->v6;
8110 break;
8111 #endif /* INET6 */
8112 default:
8113 return (0);
8114 }
8115
8116 rtalloc((struct route *)&ro);
8117
8118 if (ro.ro_rt != NULL) {
8119 RTFREE(ro.ro_rt);
8120 }
8121
8122 return (ret);
8123 }
8124
8125 #if INET
8126 static void
8127 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
8128 struct pf_state *s, struct pf_pdesc *pd)
8129 {
8130 #pragma unused(pd)
8131 struct mbuf *m0, *m1;
8132 struct route iproute;
8133 struct route *ro = NULL;
8134 struct sockaddr_in *dst;
8135 struct ip *ip;
8136 struct ifnet *ifp = NULL;
8137 struct pf_addr naddr;
8138 struct pf_src_node *sn = NULL;
8139 int error = 0;
8140 int sw_csum = 0;
8141
8142 if (m == NULL || *m == NULL || r == NULL ||
8143 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
8144 panic("pf_route: invalid parameters");
8145
8146 if (pd->pf_mtag->routed++ > 3) {
8147 m0 = *m;
8148 *m = NULL;
8149 goto bad;
8150 }
8151
8152 if (r->rt == PF_DUPTO) {
8153 if ((m0 = m_copym(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
8154 return;
8155 } else {
8156 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
8157 return;
8158 m0 = *m;
8159 }
8160
8161 if (m0->m_len < (int)sizeof (struct ip)) {
8162 DPFPRINTF(PF_DEBUG_URGENT,
8163 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8164 goto bad;
8165 }
8166
8167 ip = mtod(m0, struct ip *);
8168
8169 ro = &iproute;
8170 bzero((caddr_t)ro, sizeof (*ro));
8171 dst = satosin(&ro->ro_dst);
8172 dst->sin_family = AF_INET;
8173 dst->sin_len = sizeof (*dst);
8174 dst->sin_addr = ip->ip_dst;
8175
8176 if (r->rt == PF_FASTROUTE) {
8177 rtalloc(ro);
8178 if (ro->ro_rt == 0) {
8179 ipstat.ips_noroute++;
8180 goto bad;
8181 }
8182
8183 ifp = ro->ro_rt->rt_ifp;
8184 RT_LOCK(ro->ro_rt);
8185 ro->ro_rt->rt_use++;
8186
8187 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
8188 dst = satosin(ro->ro_rt->rt_gateway);
8189 RT_UNLOCK(ro->ro_rt);
8190 } else {
8191 if (TAILQ_EMPTY(&r->rpool.list)) {
8192 DPFPRINTF(PF_DEBUG_URGENT,
8193 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
8194 goto bad;
8195 }
8196 if (s == NULL) {
8197 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
8198 &naddr, NULL, &sn);
8199 if (!PF_AZERO(&naddr, AF_INET))
8200 dst->sin_addr.s_addr = naddr.v4.s_addr;
8201 ifp = r->rpool.cur->kif ?
8202 r->rpool.cur->kif->pfik_ifp : NULL;
8203 } else {
8204 if (!PF_AZERO(&s->rt_addr, AF_INET))
8205 dst->sin_addr.s_addr =
8206 s->rt_addr.v4.s_addr;
8207 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
8208 }
8209 }
8210 if (ifp == NULL)
8211 goto bad;
8212
8213 if (oifp != ifp) {
8214 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
8215 goto bad;
8216 else if (m0 == NULL)
8217 goto done;
8218 if (m0->m_len < (int)sizeof (struct ip)) {
8219 DPFPRINTF(PF_DEBUG_URGENT,
8220 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8221 goto bad;
8222 }
8223 ip = mtod(m0, struct ip *);
8224 }
8225
8226 /* Copied from ip_output. */
8227
8228 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
8229 m0->m_pkthdr.csum_flags |= CSUM_IP;
8230 sw_csum = m0->m_pkthdr.csum_flags &
8231 ~IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
8232
8233 if (ifp->if_hwassist & CSUM_TCP_SUM16) {
8234 /*
8235 * Special case code for GMACE
8236 * frames that can be checksumed by GMACE SUM16 HW:
8237 * frame >64, no fragments, no UDP
8238 */
8239 if (apple_hwcksum_tx && (m0->m_pkthdr.csum_flags & CSUM_TCP) &&
8240 (ntohs(ip->ip_len) > 50) &&
8241 (ntohs(ip->ip_len) <= ifp->if_mtu)) {
8242 /*
8243 * Apple GMAC HW, expects:
8244 * STUFF_OFFSET << 16 | START_OFFSET
8245 */
8246 /* IP+Enet header length */
8247 u_short offset = ((ip->ip_hl) << 2) + 14;
8248 u_short csumprev = m0->m_pkthdr.csum_data & 0xffff;
8249 m0->m_pkthdr.csum_flags = CSUM_DATA_VALID |
8250 CSUM_TCP_SUM16; /* for GMAC */
8251 m0->m_pkthdr.csum_data = (csumprev + offset) << 16 ;
8252 m0->m_pkthdr.csum_data += offset;
8253 /* do IP hdr chksum in software */
8254 sw_csum = CSUM_DELAY_IP;
8255 } else {
8256 /* let the software handle any UDP or TCP checksums */
8257 sw_csum |= (CSUM_DELAY_DATA & m0->m_pkthdr.csum_flags);
8258 }
8259 } else if (apple_hwcksum_tx == 0) {
8260 sw_csum |= (CSUM_DELAY_DATA | CSUM_DELAY_IP) &
8261 m0->m_pkthdr.csum_flags;
8262 }
8263
8264 if (sw_csum & CSUM_DELAY_DATA) {
8265 in_delayed_cksum(m0);
8266 sw_csum &= ~CSUM_DELAY_DATA;
8267 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
8268 }
8269
8270 if (apple_hwcksum_tx != 0) {
8271 m0->m_pkthdr.csum_flags &=
8272 IF_HWASSIST_CSUM_FLAGS(ifp->if_hwassist);
8273 } else {
8274 m0->m_pkthdr.csum_flags = 0;
8275 }
8276
8277 if (ntohs(ip->ip_len) <= ifp->if_mtu ||
8278 (ifp->if_hwassist & CSUM_FRAGMENT)) {
8279 ip->ip_sum = 0;
8280 if (sw_csum & CSUM_DELAY_IP)
8281 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
8282 error = ifnet_output(ifp, PF_INET, m0, ro, sintosa(dst));
8283 goto done;
8284 }
8285
8286 /*
8287 * Too large for interface; fragment if possible.
8288 * Must be able to put at least 8 bytes per fragment.
8289 */
8290 if (ip->ip_off & htons(IP_DF)) {
8291 ipstat.ips_cantfrag++;
8292 if (r->rt != PF_DUPTO) {
8293 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
8294 ifp->if_mtu);
8295 goto done;
8296 } else
8297 goto bad;
8298 }
8299
8300 m1 = m0;
8301
8302 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
8303 #if BYTE_ORDER != BIG_ENDIAN
8304 NTOHS(ip->ip_off);
8305 NTOHS(ip->ip_len);
8306 #endif
8307 error = ip_fragment(m0, ifp, ifp->if_mtu, sw_csum);
8308
8309 if (error) {
8310 m0 = NULL;
8311 goto bad;
8312 }
8313
8314 for (m0 = m1; m0; m0 = m1) {
8315 m1 = m0->m_nextpkt;
8316 m0->m_nextpkt = 0;
8317 if (error == 0)
8318 error = ifnet_output(ifp, PF_INET, m0, ro,
8319 sintosa(dst));
8320 else
8321 m_freem(m0);
8322 }
8323
8324 if (error == 0)
8325 ipstat.ips_fragmented++;
8326
8327 done:
8328 if (r->rt != PF_DUPTO)
8329 *m = NULL;
8330 if (ro == &iproute && ro->ro_rt)
8331 RTFREE(ro->ro_rt);
8332 return;
8333
8334 bad:
8335 m_freem(m0);
8336 goto done;
8337 }
8338 #endif /* INET */
8339
8340 #if INET6
8341 static void
8342 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
8343 struct pf_state *s, struct pf_pdesc *pd)
8344 {
8345 #pragma unused(pd)
8346 struct mbuf *m0;
8347 struct route_in6 ip6route;
8348 struct route_in6 *ro;
8349 struct sockaddr_in6 *dst;
8350 struct ip6_hdr *ip6;
8351 struct ifnet *ifp = NULL;
8352 struct pf_addr naddr;
8353 struct pf_src_node *sn = NULL;
8354 int error = 0;
8355
8356 if (m == NULL || *m == NULL || r == NULL ||
8357 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
8358 panic("pf_route6: invalid parameters");
8359
8360 if (pd->pf_mtag->routed++ > 3) {
8361 m0 = *m;
8362 *m = NULL;
8363 goto bad;
8364 }
8365
8366 if (r->rt == PF_DUPTO) {
8367 if ((m0 = m_copym(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
8368 return;
8369 } else {
8370 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
8371 return;
8372 m0 = *m;
8373 }
8374
8375 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
8376 DPFPRINTF(PF_DEBUG_URGENT,
8377 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
8378 goto bad;
8379 }
8380 ip6 = mtod(m0, struct ip6_hdr *);
8381
8382 ro = &ip6route;
8383 bzero((caddr_t)ro, sizeof (*ro));
8384 dst = (struct sockaddr_in6 *)&ro->ro_dst;
8385 dst->sin6_family = AF_INET6;
8386 dst->sin6_len = sizeof (*dst);
8387 dst->sin6_addr = ip6->ip6_dst;
8388
8389 /* Cheat. XXX why only in the v6 case??? */
8390 if (r->rt == PF_FASTROUTE) {
8391 struct pf_mtag *pf_mtag;
8392
8393 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
8394 goto bad;
8395 pf_mtag->flags |= PF_TAG_GENERATED;
8396 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
8397 return;
8398 }
8399
8400 if (TAILQ_EMPTY(&r->rpool.list)) {
8401 DPFPRINTF(PF_DEBUG_URGENT,
8402 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
8403 goto bad;
8404 }
8405 if (s == NULL) {
8406 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
8407 &naddr, NULL, &sn);
8408 if (!PF_AZERO(&naddr, AF_INET6))
8409 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
8410 &naddr, AF_INET6);
8411 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
8412 } else {
8413 if (!PF_AZERO(&s->rt_addr, AF_INET6))
8414 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
8415 &s->rt_addr, AF_INET6);
8416 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
8417 }
8418 if (ifp == NULL)
8419 goto bad;
8420
8421 if (oifp != ifp) {
8422 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
8423 goto bad;
8424 else if (m0 == NULL)
8425 goto done;
8426 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
8427 DPFPRINTF(PF_DEBUG_URGENT, ("pf_route6: m0->m_len "
8428 "< sizeof (struct ip6_hdr)\n"));
8429 goto bad;
8430 }
8431 ip6 = mtod(m0, struct ip6_hdr *);
8432 }
8433
8434 /*
8435 * If the packet is too large for the outgoing interface,
8436 * send back an icmp6 error.
8437 */
8438 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
8439 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
8440 if ((unsigned)m0->m_pkthdr.len <= ifp->if_mtu) {
8441 error = nd6_output(ifp, ifp, m0, dst, NULL);
8442 } else {
8443 in6_ifstat_inc(ifp, ifs6_in_toobig);
8444 if (r->rt != PF_DUPTO)
8445 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
8446 else
8447 goto bad;
8448 }
8449
8450 done:
8451 if (r->rt != PF_DUPTO)
8452 *m = NULL;
8453 return;
8454
8455 bad:
8456 m_freem(m0);
8457 goto done;
8458 }
8459 #endif /* INET6 */
8460
8461
8462 /*
8463 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
8464 * off is the offset where the protocol header starts
8465 * len is the total length of protocol header plus payload
8466 * returns 0 when the checksum is valid, otherwise returns 1.
8467 */
8468 static int
8469 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
8470 sa_family_t af)
8471 {
8472 u_int16_t sum;
8473
8474 switch (p) {
8475 case IPPROTO_TCP:
8476 case IPPROTO_UDP:
8477 /*
8478 * Optimize for the common case; if the hardware calculated
8479 * value doesn't include pseudo-header checksum, or if it
8480 * is partially-computed (only 16-bit summation), do it in
8481 * software below.
8482 */
8483 if (apple_hwcksum_rx && (m->m_pkthdr.csum_flags &
8484 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
8485 (m->m_pkthdr.csum_data ^ 0xffff) == 0) {
8486 return (0);
8487 }
8488 break;
8489 case IPPROTO_ICMP:
8490 #if INET6
8491 case IPPROTO_ICMPV6:
8492 #endif /* INET6 */
8493 break;
8494 default:
8495 return (1);
8496 }
8497 if (off < (int)sizeof (struct ip) || len < (int)sizeof (struct udphdr))
8498 return (1);
8499 if (m->m_pkthdr.len < off + len)
8500 return (1);
8501 switch (af) {
8502 #if INET
8503 case AF_INET:
8504 if (p == IPPROTO_ICMP) {
8505 if (m->m_len < off)
8506 return (1);
8507 m->m_data += off;
8508 m->m_len -= off;
8509 sum = in_cksum(m, len);
8510 m->m_data -= off;
8511 m->m_len += off;
8512 } else {
8513 if (m->m_len < (int)sizeof (struct ip))
8514 return (1);
8515 sum = inet_cksum(m, p, off, len);
8516 }
8517 break;
8518 #endif /* INET */
8519 #if INET6
8520 case AF_INET6:
8521 if (m->m_len < (int)sizeof (struct ip6_hdr))
8522 return (1);
8523 sum = inet6_cksum(m, p, off, len);
8524 break;
8525 #endif /* INET6 */
8526 default:
8527 return (1);
8528 }
8529 if (sum) {
8530 switch (p) {
8531 case IPPROTO_TCP:
8532 tcpstat.tcps_rcvbadsum++;
8533 break;
8534 case IPPROTO_UDP:
8535 udpstat.udps_badsum++;
8536 break;
8537 case IPPROTO_ICMP:
8538 icmpstat.icps_checksum++;
8539 break;
8540 #if INET6
8541 case IPPROTO_ICMPV6:
8542 icmp6stat.icp6s_checksum++;
8543 break;
8544 #endif /* INET6 */
8545 }
8546 return (1);
8547 }
8548 return (0);
8549 }
8550
8551 #if INET
8552 #ifndef NO_APPLE_EXTENSIONS
8553 #define PF_APPLE_UPDATE_PDESC_IPv4() \
8554 do { \
8555 if (m && pd.mp && m != pd.mp) { \
8556 m = pd.mp; \
8557 h = mtod(m, struct ip *); \
8558 } \
8559 } while (0)
8560 #endif
8561
8562 int
8563 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
8564 struct ether_header *eh)
8565 {
8566 struct pfi_kif *kif;
8567 u_short action, reason = 0, log = 0;
8568 struct mbuf *m = *m0;
8569 struct ip *h = 0;
8570 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
8571 struct pf_state *s = NULL;
8572 struct pf_state_key *sk = NULL;
8573 struct pf_ruleset *ruleset = NULL;
8574 struct pf_pdesc pd;
8575 int off, dirndx, pqid = 0;
8576
8577 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
8578
8579 if (!pf_status.running)
8580 return (PF_PASS);
8581
8582 memset(&pd, 0, sizeof (pd));
8583
8584 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
8585 DPFPRINTF(PF_DEBUG_URGENT,
8586 ("pf_test: pf_get_mtag returned NULL\n"));
8587 return (PF_DROP);
8588 }
8589
8590 if (pd.pf_mtag->flags & PF_TAG_GENERATED)
8591 return (PF_PASS);
8592
8593 kif = (struct pfi_kif *)ifp->if_pf_kif;
8594
8595 if (kif == NULL) {
8596 DPFPRINTF(PF_DEBUG_URGENT,
8597 ("pf_test: kif == NULL, if_name %s\n", ifp->if_name));
8598 return (PF_DROP);
8599 }
8600 if (kif->pfik_flags & PFI_IFLAG_SKIP)
8601 return (PF_PASS);
8602
8603 #ifdef DIAGNOSTIC
8604 if ((m->m_flags & M_PKTHDR) == 0)
8605 panic("non-M_PKTHDR is passed to pf_test");
8606 #endif /* DIAGNOSTIC */
8607
8608 if (m->m_pkthdr.len < (int)sizeof (*h)) {
8609 action = PF_DROP;
8610 REASON_SET(&reason, PFRES_SHORT);
8611 log = 1;
8612 goto done;
8613 }
8614
8615 /* We do IP header normalization and packet reassembly here */
8616 if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
8617 action = PF_DROP;
8618 goto done;
8619 }
8620 m = *m0; /* pf_normalize messes with m0 */
8621 h = mtod(m, struct ip *);
8622
8623 off = h->ip_hl << 2;
8624 if (off < (int)sizeof (*h)) {
8625 action = PF_DROP;
8626 REASON_SET(&reason, PFRES_SHORT);
8627 log = 1;
8628 goto done;
8629 }
8630
8631 pd.src = (struct pf_addr *)&h->ip_src;
8632 pd.dst = (struct pf_addr *)&h->ip_dst;
8633 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
8634 pd.ip_sum = &h->ip_sum;
8635 pd.proto = h->ip_p;
8636 #ifndef NO_APPLE_EXTENSIONS
8637 pd.proto_variant = 0;
8638 pd.mp = m;
8639 pd.lmw = 0;
8640 #endif
8641 pd.af = AF_INET;
8642 pd.tos = h->ip_tos;
8643 pd.tot_len = ntohs(h->ip_len);
8644 pd.eh = eh;
8645
8646 /* handle fragments that didn't get reassembled by normalization */
8647 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
8648 action = pf_test_fragment(&r, dir, kif, m, h,
8649 &pd, &a, &ruleset);
8650 goto done;
8651 }
8652
8653 switch (h->ip_p) {
8654
8655 case IPPROTO_TCP: {
8656 struct tcphdr th;
8657 pd.hdr.tcp = &th;
8658 if (!pf_pull_hdr(m, off, &th, sizeof (th),
8659 &action, &reason, AF_INET)) {
8660 log = action != PF_PASS;
8661 goto done;
8662 }
8663 pd.p_len = pd.tot_len - off - (th.th_off << 2);
8664 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
8665 pqid = 1;
8666 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
8667 #ifndef NO_APPLE_EXTENSIONS
8668 if (pd.lmw < 0)
8669 goto done;
8670 PF_APPLE_UPDATE_PDESC_IPv4();
8671 #endif
8672 if (action == PF_DROP)
8673 goto done;
8674 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
8675 &reason);
8676 #ifndef NO_APPLE_EXTENSIONS
8677 if (pd.lmw < 0)
8678 goto done;
8679 PF_APPLE_UPDATE_PDESC_IPv4();
8680 #endif
8681 if (action == PF_PASS) {
8682 #if NPFSYNC
8683 pfsync_update_state(s);
8684 #endif /* NPFSYNC */
8685 r = s->rule.ptr;
8686 a = s->anchor.ptr;
8687 log = s->log;
8688 } else if (s == NULL)
8689 action = pf_test_rule(&r, &s, dir, kif,
8690 m, off, h, &pd, &a, &ruleset, &ipintrq);
8691 break;
8692 }
8693
8694 case IPPROTO_UDP: {
8695 struct udphdr uh;
8696
8697 pd.hdr.udp = &uh;
8698 if (!pf_pull_hdr(m, off, &uh, sizeof (uh),
8699 &action, &reason, AF_INET)) {
8700 log = action != PF_PASS;
8701 goto done;
8702 }
8703 if (uh.uh_dport == 0 ||
8704 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
8705 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
8706 action = PF_DROP;
8707 REASON_SET(&reason, PFRES_SHORT);
8708 goto done;
8709 }
8710 #ifndef NO_APPLE_EXTENSIONS
8711 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
8712 &reason);
8713 if (pd.lmw < 0)
8714 goto done;
8715 PF_APPLE_UPDATE_PDESC_IPv4();
8716 #else
8717 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
8718 #endif
8719 if (action == PF_PASS) {
8720 #if NPFSYNC
8721 pfsync_update_state(s);
8722 #endif /* NPFSYNC */
8723 r = s->rule.ptr;
8724 a = s->anchor.ptr;
8725 log = s->log;
8726 } else if (s == NULL)
8727 action = pf_test_rule(&r, &s, dir, kif,
8728 m, off, h, &pd, &a, &ruleset, &ipintrq);
8729 break;
8730 }
8731
8732 case IPPROTO_ICMP: {
8733 struct icmp ih;
8734
8735 pd.hdr.icmp = &ih;
8736 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
8737 &action, &reason, AF_INET)) {
8738 log = action != PF_PASS;
8739 goto done;
8740 }
8741 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
8742 &reason);
8743 #ifndef NO_APPLE_EXTENSIONS
8744 if (pd.lmw < 0)
8745 goto done;
8746 PF_APPLE_UPDATE_PDESC_IPv4();
8747 #endif
8748 if (action == PF_PASS) {
8749 #if NPFSYNC
8750 pfsync_update_state(s);
8751 #endif /* NPFSYNC */
8752 r = s->rule.ptr;
8753 a = s->anchor.ptr;
8754 log = s->log;
8755 } else if (s == NULL)
8756 action = pf_test_rule(&r, &s, dir, kif,
8757 m, off, h, &pd, &a, &ruleset, &ipintrq);
8758 break;
8759 }
8760
8761 #ifndef NO_APPLE_EXTENSIONS
8762 case IPPROTO_ESP: {
8763 struct pf_esp_hdr esp;
8764
8765 pd.hdr.esp = &esp;
8766 if (!pf_pull_hdr(m, off, &esp, sizeof (esp), &action, &reason,
8767 AF_INET)) {
8768 log = action != PF_PASS;
8769 goto done;
8770 }
8771 action = pf_test_state_esp(&s, dir, kif, off, &pd);
8772 if (pd.lmw < 0)
8773 goto done;
8774 PF_APPLE_UPDATE_PDESC_IPv4();
8775 if (action == PF_PASS) {
8776 #if NPFSYNC
8777 pfsync_update_state(s);
8778 #endif /* NPFSYNC */
8779 r = s->rule.ptr;
8780 a = s->anchor.ptr;
8781 log = s->log;
8782 } else if (s == NULL)
8783 action = pf_test_rule(&r, &s, dir, kif,
8784 m, off, h, &pd, &a, &ruleset, &ipintrq);
8785 break;
8786 }
8787
8788 case IPPROTO_GRE: {
8789 struct pf_grev1_hdr grev1;
8790 pd.hdr.grev1 = &grev1;
8791 if (!pf_pull_hdr(m, off, &grev1, sizeof (grev1), &action,
8792 &reason, AF_INET)) {
8793 log = (action != PF_PASS);
8794 goto done;
8795 }
8796 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
8797 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
8798 if (ntohs(grev1.payload_length) >
8799 m->m_pkthdr.len - off) {
8800 action = PF_DROP;
8801 REASON_SET(&reason, PFRES_SHORT);
8802 goto done;
8803 }
8804 pd.proto_variant = PF_GRE_PPTP_VARIANT;
8805 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
8806 if (pd.lmw < 0) goto done;
8807 PF_APPLE_UPDATE_PDESC_IPv4();
8808 if (action == PF_PASS) {
8809 #if NPFSYNC
8810 pfsync_update_state(s);
8811 #endif /* NPFSYNC */
8812 r = s->rule.ptr;
8813 a = s->anchor.ptr;
8814 log = s->log;
8815 break;
8816 } else if (s == NULL) {
8817 action = pf_test_rule(&r, &s, dir, kif, m, off,
8818 h, &pd, &a, &ruleset, &ipintrq);
8819 if (action == PF_PASS)
8820 break;
8821 }
8822 }
8823
8824 /* not GREv1/PPTP, so treat as ordinary GRE... */
8825 }
8826 #endif
8827
8828 default:
8829 action = pf_test_state_other(&s, dir, kif, &pd);
8830 #ifndef NO_APPLE_EXTENSIONS
8831 if (pd.lmw < 0)
8832 goto done;
8833 PF_APPLE_UPDATE_PDESC_IPv4();
8834 #endif
8835 if (action == PF_PASS) {
8836 #if NPFSYNC
8837 pfsync_update_state(s);
8838 #endif /* NPFSYNC */
8839 r = s->rule.ptr;
8840 a = s->anchor.ptr;
8841 log = s->log;
8842 } else if (s == NULL)
8843 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
8844 &pd, &a, &ruleset, &ipintrq);
8845 break;
8846 }
8847
8848 done:
8849 #ifndef NO_APPLE_EXTENSIONS
8850 *m0 = pd.mp;
8851 PF_APPLE_UPDATE_PDESC_IPv4();
8852 #endif
8853
8854 if (action == PF_PASS && h->ip_hl > 5 &&
8855 !((s && s->allow_opts) || r->allow_opts)) {
8856 action = PF_DROP;
8857 REASON_SET(&reason, PFRES_IPOPTIONS);
8858 log = 1;
8859 DPFPRINTF(PF_DEBUG_MISC,
8860 ("pf: dropping packet with ip options [hlen=%u]\n",
8861 (unsigned int) h->ip_hl));
8862 }
8863
8864 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid))
8865 (void) pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0,
8866 r->rtableid);
8867
8868 #if ALTQ
8869 if (action == PF_PASS && r->qid) {
8870 if (pqid || (pd.tos & IPTOS_LOWDELAY))
8871 pd.pf_mtag->qid = r->pqid;
8872 else
8873 pd.pf_mtag->qid = r->qid;
8874 /* add hints for ecn */
8875 pd.pf_mtag->hdr = h;
8876 }
8877 #endif /* ALTQ */
8878
8879 /*
8880 * connections redirected to loopback should not match sockets
8881 * bound specifically to loopback due to security implications,
8882 * see tcp_input() and in_pcblookup_listen().
8883 */
8884 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
8885 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
8886 (s->nat_rule.ptr->action == PF_RDR ||
8887 s->nat_rule.ptr->action == PF_BINAT) &&
8888 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
8889 pd.pf_mtag->flags |= PF_TAG_TRANSLATE_LOCALHOST;
8890
8891 if (log) {
8892 struct pf_rule *lr;
8893
8894 if (s != NULL && s->nat_rule.ptr != NULL &&
8895 s->nat_rule.ptr->log & PF_LOG_ALL)
8896 lr = s->nat_rule.ptr;
8897 else
8898 lr = r;
8899 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
8900 &pd);
8901 }
8902
8903 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
8904 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
8905
8906 if (action == PF_PASS || r->action == PF_DROP) {
8907 dirndx = (dir == PF_OUT);
8908 r->packets[dirndx]++;
8909 r->bytes[dirndx] += pd.tot_len;
8910 if (a != NULL) {
8911 a->packets[dirndx]++;
8912 a->bytes[dirndx] += pd.tot_len;
8913 }
8914 if (s != NULL) {
8915 sk = s->state_key;
8916 if (s->nat_rule.ptr != NULL) {
8917 s->nat_rule.ptr->packets[dirndx]++;
8918 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
8919 }
8920 if (s->src_node != NULL) {
8921 s->src_node->packets[dirndx]++;
8922 s->src_node->bytes[dirndx] += pd.tot_len;
8923 }
8924 if (s->nat_src_node != NULL) {
8925 s->nat_src_node->packets[dirndx]++;
8926 s->nat_src_node->bytes[dirndx] += pd.tot_len;
8927 }
8928 dirndx = (dir == sk->direction) ? 0 : 1;
8929 s->packets[dirndx]++;
8930 s->bytes[dirndx] += pd.tot_len;
8931 }
8932 tr = r;
8933 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
8934 if (nr != NULL) {
8935 struct pf_addr *x;
8936 /*
8937 * XXX: we need to make sure that the addresses
8938 * passed to pfr_update_stats() are the same than
8939 * the addresses used during matching (pfr_match)
8940 */
8941 if (r == &pf_default_rule) {
8942 tr = nr;
8943 x = (sk == NULL || sk->direction == dir) ?
8944 &pd.baddr : &pd.naddr;
8945 } else
8946 x = (sk == NULL || sk->direction == dir) ?
8947 &pd.naddr : &pd.baddr;
8948 if (x == &pd.baddr || s == NULL) {
8949 /* we need to change the address */
8950 if (dir == PF_OUT)
8951 pd.src = x;
8952 else
8953 pd.dst = x;
8954 }
8955 }
8956 if (tr->src.addr.type == PF_ADDR_TABLE)
8957 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
8958 sk->direction == dir) ?
8959 pd.src : pd.dst, pd.af,
8960 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
8961 tr->src.neg);
8962 if (tr->dst.addr.type == PF_ADDR_TABLE)
8963 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
8964 sk->direction == dir) ? pd.dst : pd.src, pd.af,
8965 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
8966 tr->dst.neg);
8967 }
8968
8969 #ifndef NO_APPLE_EXTENSIONS
8970 VERIFY(m == NULL || pd.mp == NULL || pd.mp == m);
8971
8972 if (*m0) {
8973 if (pd.lmw < 0) {
8974 REASON_SET(&reason, PFRES_MEMORY);
8975 action = PF_DROP;
8976 }
8977
8978 if (action == PF_DROP) {
8979 m_freem(*m0);
8980 *m0 = NULL;
8981 return (PF_DROP);
8982 }
8983
8984 *m0 = m;
8985 }
8986 #endif
8987
8988 if (action == PF_SYNPROXY_DROP) {
8989 m_freem(*m0);
8990 *m0 = NULL;
8991 action = PF_PASS;
8992 } else if (r->rt)
8993 /* pf_route can free the mbuf causing *m0 to become NULL */
8994 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
8995
8996 return (action);
8997 }
8998 #endif /* INET */
8999
9000 #if INET6
9001 #ifndef NO_APPLE_EXTENSIONS
9002 #define PF_APPLE_UPDATE_PDESC_IPv6() \
9003 do { \
9004 if (m && pd.mp && m != pd.mp) { \
9005 if (n == m) \
9006 n = pd.mp; \
9007 m = pd.mp; \
9008 h = mtod(m, struct ip6_hdr *); \
9009 } \
9010 } while (0)
9011 #endif
9012
9013 int
9014 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
9015 struct ether_header *eh)
9016 {
9017 struct pfi_kif *kif;
9018 u_short action, reason = 0, log = 0;
9019 struct mbuf *m = *m0, *n = NULL;
9020 struct ip6_hdr *h;
9021 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
9022 struct pf_state *s = NULL;
9023 struct pf_state_key *sk = NULL;
9024 struct pf_ruleset *ruleset = NULL;
9025 struct pf_pdesc pd;
9026 int off, terminal = 0, dirndx, rh_cnt = 0;
9027
9028 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9029
9030 if (!pf_status.running)
9031 return (PF_PASS);
9032
9033 memset(&pd, 0, sizeof (pd));
9034
9035 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
9036 DPFPRINTF(PF_DEBUG_URGENT,
9037 ("pf_test6: pf_get_mtag returned NULL\n"));
9038 return (PF_DROP);
9039 }
9040
9041 if (pd.pf_mtag->flags & PF_TAG_GENERATED)
9042 return (PF_PASS);
9043
9044 kif = (struct pfi_kif *)ifp->if_pf_kif;
9045
9046 if (kif == NULL) {
9047 DPFPRINTF(PF_DEBUG_URGENT,
9048 ("pf_test6: kif == NULL, if_name %s\n", ifp->if_name));
9049 return (PF_DROP);
9050 }
9051 if (kif->pfik_flags & PFI_IFLAG_SKIP)
9052 return (PF_PASS);
9053
9054 #ifdef DIAGNOSTIC
9055 if ((m->m_flags & M_PKTHDR) == 0)
9056 panic("non-M_PKTHDR is passed to pf_test6");
9057 #endif /* DIAGNOSTIC */
9058
9059 h = mtod(m, struct ip6_hdr *);
9060
9061 if (m->m_pkthdr.len < (int)sizeof (*h)) {
9062 action = PF_DROP;
9063 REASON_SET(&reason, PFRES_SHORT);
9064 log = 1;
9065 goto done;
9066 }
9067
9068 /* We do IP header normalization and packet reassembly here */
9069 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
9070 action = PF_DROP;
9071 goto done;
9072 }
9073 m = *m0; /* pf_normalize messes with m0 */
9074 h = mtod(m, struct ip6_hdr *);
9075
9076 #if 1
9077 /*
9078 * we do not support jumbogram yet. if we keep going, zero ip6_plen
9079 * will do something bad, so drop the packet for now.
9080 */
9081 if (htons(h->ip6_plen) == 0) {
9082 action = PF_DROP;
9083 REASON_SET(&reason, PFRES_NORM); /*XXX*/
9084 goto done;
9085 }
9086 #endif
9087
9088 pd.src = (struct pf_addr *)&h->ip6_src;
9089 pd.dst = (struct pf_addr *)&h->ip6_dst;
9090 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
9091 pd.ip_sum = NULL;
9092 pd.af = AF_INET6;
9093 pd.tos = 0;
9094 pd.tot_len = ntohs(h->ip6_plen) + sizeof (struct ip6_hdr);
9095 pd.eh = eh;
9096
9097 off = ((caddr_t)h - m->m_data) + sizeof (struct ip6_hdr);
9098 pd.proto = h->ip6_nxt;
9099 #ifndef NO_APPLE_EXTENSIONS
9100 pd.proto_variant = 0;
9101 pd.mp = m;
9102 pd.lmw = 0;
9103 #endif
9104 do {
9105 switch (pd.proto) {
9106 case IPPROTO_FRAGMENT:
9107 action = pf_test_fragment(&r, dir, kif, m, h,
9108 &pd, &a, &ruleset);
9109 if (action == PF_DROP)
9110 REASON_SET(&reason, PFRES_FRAG);
9111 goto done;
9112 case IPPROTO_ROUTING: {
9113 struct ip6_rthdr rthdr;
9114
9115 if (rh_cnt++) {
9116 DPFPRINTF(PF_DEBUG_MISC,
9117 ("pf: IPv6 more than one rthdr\n"));
9118 action = PF_DROP;
9119 REASON_SET(&reason, PFRES_IPOPTIONS);
9120 log = 1;
9121 goto done;
9122 }
9123 if (!pf_pull_hdr(m, off, &rthdr, sizeof (rthdr), NULL,
9124 &reason, pd.af)) {
9125 DPFPRINTF(PF_DEBUG_MISC,
9126 ("pf: IPv6 short rthdr\n"));
9127 action = PF_DROP;
9128 REASON_SET(&reason, PFRES_SHORT);
9129 log = 1;
9130 goto done;
9131 }
9132 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
9133 DPFPRINTF(PF_DEBUG_MISC,
9134 ("pf: IPv6 rthdr0\n"));
9135 action = PF_DROP;
9136 REASON_SET(&reason, PFRES_IPOPTIONS);
9137 log = 1;
9138 goto done;
9139 }
9140 /* FALLTHROUGH */
9141 }
9142 case IPPROTO_AH:
9143 case IPPROTO_HOPOPTS:
9144 case IPPROTO_DSTOPTS: {
9145 /* get next header and header length */
9146 struct ip6_ext opt6;
9147
9148 if (!pf_pull_hdr(m, off, &opt6, sizeof (opt6),
9149 NULL, &reason, pd.af)) {
9150 DPFPRINTF(PF_DEBUG_MISC,
9151 ("pf: IPv6 short opt\n"));
9152 action = PF_DROP;
9153 log = 1;
9154 goto done;
9155 }
9156 if (pd.proto == IPPROTO_AH)
9157 off += (opt6.ip6e_len + 2) * 4;
9158 else
9159 off += (opt6.ip6e_len + 1) * 8;
9160 pd.proto = opt6.ip6e_nxt;
9161 /* goto the next header */
9162 break;
9163 }
9164 default:
9165 terminal++;
9166 break;
9167 }
9168 } while (!terminal);
9169
9170 /* if there's no routing header, use unmodified mbuf for checksumming */
9171 if (!n)
9172 n = m;
9173
9174 switch (pd.proto) {
9175
9176 case IPPROTO_TCP: {
9177 struct tcphdr th;
9178
9179 pd.hdr.tcp = &th;
9180 if (!pf_pull_hdr(m, off, &th, sizeof (th),
9181 &action, &reason, AF_INET6)) {
9182 log = action != PF_PASS;
9183 goto done;
9184 }
9185 pd.p_len = pd.tot_len - off - (th.th_off << 2);
9186 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
9187 #ifndef NO_APPLE_EXTENSIONS
9188 if (pd.lmw < 0)
9189 goto done;
9190 PF_APPLE_UPDATE_PDESC_IPv6();
9191 #endif
9192 if (action == PF_DROP)
9193 goto done;
9194 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
9195 &reason);
9196 #ifndef NO_APPLE_EXTENSIONS
9197 if (pd.lmw < 0)
9198 goto done;
9199 PF_APPLE_UPDATE_PDESC_IPv6();
9200 #endif
9201 if (action == PF_PASS) {
9202 #if NPFSYNC
9203 pfsync_update_state(s);
9204 #endif /* NPFSYNC */
9205 r = s->rule.ptr;
9206 a = s->anchor.ptr;
9207 log = s->log;
9208 } else if (s == NULL)
9209 action = pf_test_rule(&r, &s, dir, kif,
9210 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9211 break;
9212 }
9213
9214 case IPPROTO_UDP: {
9215 struct udphdr uh;
9216
9217 pd.hdr.udp = &uh;
9218 if (!pf_pull_hdr(m, off, &uh, sizeof (uh),
9219 &action, &reason, AF_INET6)) {
9220 log = action != PF_PASS;
9221 goto done;
9222 }
9223 if (uh.uh_dport == 0 ||
9224 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
9225 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
9226 action = PF_DROP;
9227 REASON_SET(&reason, PFRES_SHORT);
9228 goto done;
9229 }
9230 #ifndef NO_APPLE_EXTENSIONS
9231 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
9232 &reason);
9233 if (pd.lmw < 0)
9234 goto done;
9235 PF_APPLE_UPDATE_PDESC_IPv6();
9236 #else
9237 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
9238 #endif
9239 if (action == PF_PASS) {
9240 #if NPFSYNC
9241 pfsync_update_state(s);
9242 #endif /* NPFSYNC */
9243 r = s->rule.ptr;
9244 a = s->anchor.ptr;
9245 log = s->log;
9246 } else if (s == NULL)
9247 action = pf_test_rule(&r, &s, dir, kif,
9248 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9249 break;
9250 }
9251
9252 case IPPROTO_ICMPV6: {
9253 struct icmp6_hdr ih;
9254
9255 pd.hdr.icmp6 = &ih;
9256 if (!pf_pull_hdr(m, off, &ih, sizeof (ih),
9257 &action, &reason, AF_INET6)) {
9258 log = action != PF_PASS;
9259 goto done;
9260 }
9261 action = pf_test_state_icmp(&s, dir, kif,
9262 m, off, h, &pd, &reason);
9263 #ifndef NO_APPLE_EXTENSIONS
9264 if (pd.lmw < 0)
9265 goto done;
9266 PF_APPLE_UPDATE_PDESC_IPv6();
9267 #endif
9268 if (action == PF_PASS) {
9269 #if NPFSYNC
9270 pfsync_update_state(s);
9271 #endif /* NPFSYNC */
9272 r = s->rule.ptr;
9273 a = s->anchor.ptr;
9274 log = s->log;
9275 } else if (s == NULL)
9276 action = pf_test_rule(&r, &s, dir, kif,
9277 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9278 break;
9279 }
9280
9281 #ifndef NO_APPLE_EXTENSIONS
9282 case IPPROTO_ESP: {
9283 struct pf_esp_hdr esp;
9284
9285 pd.hdr.esp = &esp;
9286 if (!pf_pull_hdr(m, off, &esp, sizeof (esp), &action, &reason,
9287 AF_INET6)) {
9288 log = action != PF_PASS;
9289 goto done;
9290 }
9291 action = pf_test_state_esp(&s, dir, kif, off, &pd);
9292 if (pd.lmw < 0)
9293 goto done;
9294 PF_APPLE_UPDATE_PDESC_IPv6();
9295 if (action == PF_PASS) {
9296 #if NPFSYNC
9297 pfsync_update_state(s);
9298 #endif /* NPFSYNC */
9299 r = s->rule.ptr;
9300 a = s->anchor.ptr;
9301 log = s->log;
9302 } else if (s == NULL)
9303 action = pf_test_rule(&r, &s, dir, kif,
9304 m, off, h, &pd, &a, &ruleset, &ip6intrq);
9305 break;
9306 }
9307
9308 case IPPROTO_GRE: {
9309 struct pf_grev1_hdr grev1;
9310
9311 pd.hdr.grev1 = &grev1;
9312 if (!pf_pull_hdr(m, off, &grev1, sizeof (grev1), &action,
9313 &reason, AF_INET6)) {
9314 log = (action != PF_PASS);
9315 goto done;
9316 }
9317 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
9318 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
9319 if (ntohs(grev1.payload_length) >
9320 m->m_pkthdr.len - off) {
9321 action = PF_DROP;
9322 REASON_SET(&reason, PFRES_SHORT);
9323 goto done;
9324 }
9325 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
9326 if (pd.lmw < 0)
9327 goto done;
9328 PF_APPLE_UPDATE_PDESC_IPv6();
9329 if (action == PF_PASS) {
9330 #if NPFSYNC
9331 pfsync_update_state(s);
9332 #endif /* NPFSYNC */
9333 r = s->rule.ptr;
9334 a = s->anchor.ptr;
9335 log = s->log;
9336 break;
9337 } else if (s == NULL) {
9338 action = pf_test_rule(&r, &s, dir, kif, m, off,
9339 h, &pd, &a, &ruleset, &ip6intrq);
9340 if (action == PF_PASS)
9341 break;
9342 }
9343 }
9344
9345 /* not GREv1/PPTP, so treat as ordinary GRE... */
9346 }
9347 #endif
9348
9349 default:
9350 action = pf_test_state_other(&s, dir, kif, &pd);
9351 #ifndef NO_APPLE_EXTENSIONS
9352 if (pd.lmw < 0)
9353 goto done;
9354 PF_APPLE_UPDATE_PDESC_IPv6();
9355 #endif
9356 if (action == PF_PASS) {
9357 #if NPFSYNC
9358 pfsync_update_state(s);
9359 #endif /* NPFSYNC */
9360 r = s->rule.ptr;
9361 a = s->anchor.ptr;
9362 log = s->log;
9363 } else if (s == NULL)
9364 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
9365 &pd, &a, &ruleset, &ip6intrq);
9366 break;
9367 }
9368
9369 done:
9370 #ifndef NO_APPLE_EXTENSIONS
9371 *m0 = pd.mp;
9372 PF_APPLE_UPDATE_PDESC_IPv6();
9373 #endif
9374
9375 if (n != m) {
9376 m_freem(n);
9377 n = NULL;
9378 }
9379
9380 /* handle dangerous IPv6 extension headers. */
9381 if (action == PF_PASS && rh_cnt &&
9382 !((s && s->allow_opts) || r->allow_opts)) {
9383 action = PF_DROP;
9384 REASON_SET(&reason, PFRES_IPOPTIONS);
9385 log = 1;
9386 DPFPRINTF(PF_DEBUG_MISC,
9387 ("pf: dropping packet with dangerous v6 headers\n"));
9388 }
9389
9390 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid))
9391 (void) pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0,
9392 r->rtableid);
9393
9394 #if ALTQ
9395 if (action == PF_PASS && r->qid) {
9396 if (pd.tos & IPTOS_LOWDELAY)
9397 pd.pf_mtag->qid = r->pqid;
9398 else
9399 pd.pf_mtag->qid = r->qid;
9400 /* add hints for ecn */
9401 pd.pf_mtag->hdr = h;
9402 }
9403 #endif /* ALTQ */
9404
9405 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
9406 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
9407 (s->nat_rule.ptr->action == PF_RDR ||
9408 s->nat_rule.ptr->action == PF_BINAT) &&
9409 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
9410 pd.pf_mtag->flags |= PF_TAG_TRANSLATE_LOCALHOST;
9411
9412 if (log) {
9413 struct pf_rule *lr;
9414
9415 if (s != NULL && s->nat_rule.ptr != NULL &&
9416 s->nat_rule.ptr->log & PF_LOG_ALL)
9417 lr = s->nat_rule.ptr;
9418 else
9419 lr = r;
9420 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
9421 &pd);
9422 }
9423
9424 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
9425 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
9426
9427 if (action == PF_PASS || r->action == PF_DROP) {
9428 dirndx = (dir == PF_OUT);
9429 r->packets[dirndx]++;
9430 r->bytes[dirndx] += pd.tot_len;
9431 if (a != NULL) {
9432 a->packets[dirndx]++;
9433 a->bytes[dirndx] += pd.tot_len;
9434 }
9435 if (s != NULL) {
9436 sk = s->state_key;
9437 if (s->nat_rule.ptr != NULL) {
9438 s->nat_rule.ptr->packets[dirndx]++;
9439 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
9440 }
9441 if (s->src_node != NULL) {
9442 s->src_node->packets[dirndx]++;
9443 s->src_node->bytes[dirndx] += pd.tot_len;
9444 }
9445 if (s->nat_src_node != NULL) {
9446 s->nat_src_node->packets[dirndx]++;
9447 s->nat_src_node->bytes[dirndx] += pd.tot_len;
9448 }
9449 dirndx = (dir == sk->direction) ? 0 : 1;
9450 s->packets[dirndx]++;
9451 s->bytes[dirndx] += pd.tot_len;
9452 }
9453 tr = r;
9454 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
9455 if (nr != NULL) {
9456 struct pf_addr *x;
9457 /*
9458 * XXX: we need to make sure that the addresses
9459 * passed to pfr_update_stats() are the same than
9460 * the addresses used during matching (pfr_match)
9461 */
9462 if (r == &pf_default_rule) {
9463 tr = nr;
9464 x = (s == NULL || sk->direction == dir) ?
9465 &pd.baddr : &pd.naddr;
9466 } else {
9467 x = (s == NULL || sk->direction == dir) ?
9468 &pd.naddr : &pd.baddr;
9469 }
9470 if (x == &pd.baddr || s == NULL) {
9471 if (dir == PF_OUT)
9472 pd.src = x;
9473 else
9474 pd.dst = x;
9475 }
9476 }
9477 if (tr->src.addr.type == PF_ADDR_TABLE)
9478 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
9479 sk->direction == dir) ? pd.src : pd.dst, pd.af,
9480 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9481 tr->src.neg);
9482 if (tr->dst.addr.type == PF_ADDR_TABLE)
9483 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
9484 sk->direction == dir) ? pd.dst : pd.src, pd.af,
9485 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9486 tr->dst.neg);
9487 }
9488
9489 #if 0
9490 if (action == PF_SYNPROXY_DROP) {
9491 m_freem(*m0);
9492 *m0 = NULL;
9493 action = PF_PASS;
9494 } else if (r->rt)
9495 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9496 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9497 #else
9498 #ifndef NO_APPLE_EXTENSIONS
9499 VERIFY(m == NULL || pd.mp == NULL || pd.mp == m);
9500
9501 if (*m0) {
9502 if (pd.lmw < 0) {
9503 REASON_SET(&reason, PFRES_MEMORY);
9504 action = PF_DROP;
9505 }
9506
9507 if (action == PF_DROP) {
9508 m_freem(*m0);
9509 *m0 = NULL;
9510 return (PF_DROP);
9511 }
9512
9513 *m0 = m;
9514 }
9515
9516 if (action == PF_SYNPROXY_DROP) {
9517 m_freem(*m0);
9518 *m0 = NULL;
9519 action = PF_PASS;
9520 } else if (r->rt) {
9521 if (action == PF_PASS) {
9522 m = *m0;
9523 h = mtod(m, struct ip6_hdr *);
9524 }
9525
9526 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9527 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9528 }
9529 #else
9530 if (action != PF_SYNPROXY_DROP && r->rt)
9531 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9532 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9533
9534 if (action == PF_PASS) {
9535 m = *m0;
9536 h = mtod(m, struct ip6_hdr *);
9537 }
9538
9539 if (action == PF_SYNPROXY_DROP) {
9540 m_freem(*m0);
9541 *m0 = NULL;
9542 action = PF_PASS;
9543 }
9544 #endif
9545 #endif
9546
9547 return (action);
9548 }
9549 #endif /* INET6 */
9550
9551 static int
9552 pf_check_congestion(struct ifqueue *ifq)
9553 {
9554 #pragma unused(ifq)
9555 return (0);
9556 }
9557
9558 void
9559 pool_init(struct pool *pp, size_t size, unsigned int align, unsigned int ioff,
9560 int flags, const char *wchan, void *palloc)
9561 {
9562 #pragma unused(align, ioff, flags, palloc)
9563 bzero(pp, sizeof (*pp));
9564 pp->pool_zone = zinit(size, 1024 * size, PAGE_SIZE, wchan);
9565 if (pp->pool_zone != NULL) {
9566 zone_change(pp->pool_zone, Z_EXPAND, TRUE);
9567 zone_change(pp->pool_zone, Z_CALLERACCT, FALSE);
9568 pp->pool_hiwat = pp->pool_limit = (unsigned int)-1;
9569 pp->pool_name = wchan;
9570 }
9571 }
9572
9573 /* Zones cannot be currently destroyed */
9574 void
9575 pool_destroy(struct pool *pp)
9576 {
9577 #pragma unused(pp)
9578 }
9579
9580 void
9581 pool_sethiwat(struct pool *pp, int n)
9582 {
9583 pp->pool_hiwat = n; /* Currently unused */
9584 }
9585
9586 void
9587 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
9588 {
9589 #pragma unused(warnmess, ratecap)
9590 pp->pool_limit = n;
9591 }
9592
9593 void *
9594 pool_get(struct pool *pp, int flags)
9595 {
9596 void *buf;
9597
9598 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9599
9600 if (pp->pool_count > pp->pool_limit) {
9601 DPFPRINTF(PF_DEBUG_NOISY,
9602 ("pf: pool %s hard limit reached (%d)\n",
9603 pp->pool_name != NULL ? pp->pool_name : "unknown",
9604 pp->pool_limit));
9605 pp->pool_fails++;
9606 return (NULL);
9607 }
9608
9609 buf = zalloc_canblock(pp->pool_zone, (flags & (PR_NOWAIT | PR_WAITOK)));
9610 if (buf != NULL) {
9611 pp->pool_count++;
9612 VERIFY(pp->pool_count != 0);
9613 }
9614 return (buf);
9615 }
9616
9617 void
9618 pool_put(struct pool *pp, void *v)
9619 {
9620 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9621
9622 zfree(pp->pool_zone, v);
9623 VERIFY(pp->pool_count != 0);
9624 pp->pool_count--;
9625 }
9626
9627 struct pf_mtag *
9628 pf_find_mtag(struct mbuf *m)
9629 {
9630 #if !PF_PKTHDR
9631 struct m_tag *mtag;
9632
9633 if ((mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
9634 KERNEL_TAG_TYPE_PF, NULL)) == NULL)
9635 return (NULL);
9636
9637 return ((struct pf_mtag *)(mtag + 1));
9638 #else
9639 if (!(m->m_flags & M_PKTHDR))
9640 return (NULL);
9641
9642 return (&m->m_pkthdr.pf_mtag);
9643 #endif /* PF_PKTHDR */
9644 }
9645
9646 struct pf_mtag *
9647 pf_get_mtag(struct mbuf *m)
9648 {
9649 #if !PF_PKTHDR
9650 struct m_tag *mtag;
9651
9652 if ((mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_PF,
9653 NULL)) == NULL) {
9654 mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_PF,
9655 sizeof (struct pf_mtag), M_NOWAIT, m);
9656 if (mtag == NULL)
9657 return (NULL);
9658 bzero(mtag + 1, sizeof (struct pf_mtag));
9659 m_tag_prepend(m, mtag);
9660 }
9661 return ((struct pf_mtag *)(mtag + 1));
9662 #else
9663 return (pf_find_mtag(m));
9664 #endif /* PF_PKTHDR */
9665 }
9666
9667 uint64_t
9668 pf_time_second(void)
9669 {
9670 struct timeval t;
9671
9672 microuptime(&t);
9673 return (t.tv_sec);
9674 }
9675
9676 uint64_t
9677 pf_calendar_time_second(void)
9678 {
9679 struct timeval t;
9680
9681 microtime(&t);
9682 return (t.tv_sec);
9683 }
9684
9685 static void *
9686 hook_establish(struct hook_desc_head *head, int tail, hook_fn_t fn, void *arg)
9687 {
9688 struct hook_desc *hd;
9689
9690 hd = _MALLOC(sizeof(*hd), M_DEVBUF, M_WAITOK);
9691 if (hd == NULL)
9692 return (NULL);
9693
9694 hd->hd_fn = fn;
9695 hd->hd_arg = arg;
9696 if (tail)
9697 TAILQ_INSERT_TAIL(head, hd, hd_list);
9698 else
9699 TAILQ_INSERT_HEAD(head, hd, hd_list);
9700
9701 return (hd);
9702 }
9703
9704 static void
9705 hook_runloop(struct hook_desc_head *head, int flags)
9706 {
9707 struct hook_desc *hd;
9708
9709 if (!(flags & HOOK_REMOVE)) {
9710 if (!(flags & HOOK_ABORT))
9711 TAILQ_FOREACH(hd, head, hd_list)
9712 hd->hd_fn(hd->hd_arg);
9713 } else {
9714 while (!!(hd = TAILQ_FIRST(head))) {
9715 TAILQ_REMOVE(head, hd, hd_list);
9716 if (!(flags & HOOK_ABORT))
9717 hd->hd_fn(hd->hd_arg);
9718 if (flags & HOOK_FREE)
9719 _FREE(hd, M_DEVBUF);
9720 }
9721 }
9722 }