2  * Copyright (c) 2007-2019 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29 /*      $apfw: git commit b6bf13f8321283cd7ee82b1795e86506084b1b95 $ */ 
  30 /*      $OpenBSD: pf_ioctl.c,v 1.175 2007/02/26 22:47:43 deraadt Exp $ */ 
  33  * Copyright (c) 2001 Daniel Hartmeier 
  34  * Copyright (c) 2002,2003 Henning Brauer 
  35  * All rights reserved. 
  37  * Redistribution and use in source and binary forms, with or without 
  38  * modification, are permitted provided that the following conditions 
  41  *    - Redistributions of source code must retain the above copyright 
  42  *      notice, this list of conditions and the following disclaimer. 
  43  *    - Redistributions in binary form must reproduce the above 
  44  *      copyright notice, this list of conditions and the following 
  45  *      disclaimer in the documentation and/or other materials provided 
  46  *      with the distribution. 
  48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
  49  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
  50  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
  51  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 
  52  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 
  53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 
  54  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 
  55  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
  56  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 
  58  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
  59  * POSSIBILITY OF SUCH DAMAGE. 
  61  * Effort sponsored in part by the Defense Advanced Research Projects 
  62  * Agency (DARPA) and Air Force Research Laboratory, Air Force 
  63  * Materiel Command, USAF, under agreement number F30602-01-2-0537. 
  67 #include <machine/endian.h> 
  68 #include <sys/param.h> 
  69 #include <sys/systm.h> 
  71 #include <sys/filio.h> 
  72 #include <sys/fcntl.h> 
  73 #include <sys/socket.h> 
  74 #include <sys/socketvar.h> 
  75 #include <sys/kernel.h> 
  77 #include <sys/proc_internal.h> 
  78 #include <sys/malloc.h> 
  79 #include <sys/kauth.h> 
  81 #include <sys/mcache.h> 
  82 #include <sys/queue.h> 
  85 #include <mach/vm_param.h> 
  89 #include <net/if_types.h> 
  90 #include <net/net_api_stats.h> 
  91 #include <net/route.h> 
  93 #include <netinet/in.h> 
  94 #include <netinet/in_var.h> 
  95 #include <netinet/in_systm.h> 
  96 #include <netinet/ip.h> 
  97 #include <netinet/ip_var.h> 
  98 #include <netinet/ip_icmp.h> 
  99 #include <netinet/if_ether.h> 
 102 #include <netinet/ip_dummynet.h> 
 105 #endif /* DUMMYNET */ 
 107 #include <libkern/crypto/md5.h> 
 109 #include <machine/machine_routines.h> 
 111 #include <miscfs/devfs/devfs.h> 
 113 #include <net/pfvar.h> 
 116 #include <net/if_pfsync.h> 
 120 #include <net/if_pflog.h> 
 123 #include <netinet/ip6.h> 
 124 #include <netinet/in_pcb.h> 
 126 #include <dev/random/randomdev.h> 
 129 static void pfdetach(void); 
 131 static int pfopen(dev_t
, int, int, struct proc 
*); 
 132 static int pfclose(dev_t
, int, int, struct proc 
*); 
 133 static int pfioctl(dev_t
, u_long
, caddr_t
, int, struct proc 
*); 
 134 static int pfioctl_ioc_table(u_long
, struct pfioc_table_32 
*, 
 135     struct pfioc_table_64 
*, struct proc 
*); 
 136 static int pfioctl_ioc_tokens(u_long
, struct pfioc_tokens_32 
*, 
 137     struct pfioc_tokens_64 
*, struct proc 
*); 
 138 static int pfioctl_ioc_rule(u_long
, int, struct pfioc_rule 
*, struct proc 
*); 
 139 static int pfioctl_ioc_state_kill(u_long
, struct pfioc_state_kill 
*, 
 141 static int pfioctl_ioc_state(u_long
, struct pfioc_state 
*, struct proc 
*); 
 142 static int pfioctl_ioc_states(u_long
, struct pfioc_states_32 
*, 
 143     struct pfioc_states_64 
*, struct proc 
*); 
 144 static int pfioctl_ioc_natlook(u_long
, struct pfioc_natlook 
*, struct proc 
*); 
 145 static int pfioctl_ioc_tm(u_long
, struct pfioc_tm 
*, struct proc 
*); 
 146 static int pfioctl_ioc_limit(u_long
, struct pfioc_limit 
*, struct proc 
*); 
 147 static int pfioctl_ioc_pooladdr(u_long
, struct pfioc_pooladdr 
*, struct proc 
*); 
 148 static int pfioctl_ioc_ruleset(u_long
, struct pfioc_ruleset 
*, struct proc 
*); 
 149 static int pfioctl_ioc_trans(u_long
, struct pfioc_trans_32 
*, 
 150     struct pfioc_trans_64 
*, struct proc 
*); 
 151 static int pfioctl_ioc_src_nodes(u_long
, struct pfioc_src_nodes_32 
*, 
 152     struct pfioc_src_nodes_64 
*, struct proc 
*); 
 153 static int pfioctl_ioc_src_node_kill(u_long
, struct pfioc_src_node_kill 
*, 
 155 static int pfioctl_ioc_iface(u_long
, struct pfioc_iface_32 
*, 
 156     struct pfioc_iface_64 
*, struct proc 
*); 
 157 static struct pf_pool 
*pf_get_pool(char *, u_int32_t
, u_int8_t
, u_int32_t
, 
 158     u_int8_t
, u_int8_t
, u_int8_t
); 
 159 static void pf_mv_pool(struct pf_palist 
*, struct pf_palist 
*); 
 160 static void pf_empty_pool(struct pf_palist 
*); 
 161 static int pf_begin_rules(u_int32_t 
*, int, const char *); 
 162 static int pf_rollback_rules(u_int32_t
, int, char *); 
 163 static int pf_setup_pfsync_matching(struct pf_ruleset 
*); 
 164 static void pf_hash_rule(MD5_CTX 
*, struct pf_rule 
*); 
 165 static void pf_hash_rule_addr(MD5_CTX 
*, struct pf_rule_addr 
*, u_int8_t
); 
 166 static int pf_commit_rules(u_int32_t
, int, char *); 
 167 static void pf_rule_copyin(struct pf_rule 
*, struct pf_rule 
*, struct proc 
*, 
 169 static void pf_rule_copyout(struct pf_rule 
*, struct pf_rule 
*); 
 170 static void pf_state_export(struct pfsync_state 
*, struct pf_state_key 
*, 
 172 static void pf_state_import(struct pfsync_state 
*, struct pf_state_key 
*, 
 174 static void pf_pooladdr_copyin(struct pf_pooladdr 
*, struct pf_pooladdr 
*); 
 175 static void pf_pooladdr_copyout(struct pf_pooladdr 
*, struct pf_pooladdr 
*); 
 176 static void pf_expire_states_and_src_nodes(struct pf_rule 
*); 
 177 static void pf_delete_rule_from_ruleset(struct pf_ruleset 
*, 
 178     int, struct pf_rule 
*); 
 179 static void pf_addrwrap_setup(struct pf_addr_wrap 
*); 
 180 static int pf_rule_setup(struct pfioc_rule 
*, struct pf_rule 
*, 
 181     struct pf_ruleset 
*); 
 182 static void pf_delete_rule_by_owner(char *, u_int32_t
); 
 183 static int pf_delete_rule_by_ticket(struct pfioc_rule 
*, u_int32_t
); 
 184 static void pf_ruleset_cleanup(struct pf_ruleset 
*, int); 
 185 static void pf_deleterule_anchor_step_out(struct pf_ruleset 
**, 
 186     int, struct pf_rule 
**); 
 188 #define PF_CDEV_MAJOR   (-1) 
 190 static const struct cdevsw pf_cdevsw 
= { 
 194         .d_write      
= eno_rdwrt
, 
 197         .d_reset      
= eno_reset
, 
 199         .d_select     
= eno_select
, 
 201         .d_strategy   
= eno_strat
, 
 202         .d_reserved_1 
= eno_getc
, 
 203         .d_reserved_2 
= eno_putc
, 
 207 static void pf_attach_hooks(void); 
 209 /* currently unused along with pfdetach() */ 
 210 static void pf_detach_hooks(void); 
 214  * This is set during DIOCSTART/DIOCSTOP with pf_perim_lock held as writer, 
 215  * and used in pf_af_hook() for performance optimization, such that packets 
 216  * will enter pf_test() or pf_test6() only when PF is running. 
 218 int pf_is_enabled 
= 0; 
 220 u_int32_t pf_hash_seed
; 
 221 int16_t pf_nat64_configured 
= 0; 
 224  * These are the pf enabled reference counting variables 
 226 #define NR_TOKENS_LIMIT (INT_MAX / sizeof(struct pfioc_token)) 
 228 static u_int64_t pf_enabled_ref_count
; 
 229 static u_int32_t nr_tokens 
= 0; 
 230 static u_int32_t pffwrules
; 
 231 static u_int32_t pfdevcnt
; 
 233 SLIST_HEAD(list_head
, pfioc_kernel_token
); 
 234 static struct list_head token_list_head
; 
 236 struct pf_rule           pf_default_rule
; 
 239         char tag_name
[PF_TAG_NAME_SIZE
]; 
 241 } pf_reserved_tag_table_t
; 
 243 #define NUM_RESERVED_TAGS    2 
 244 static pf_reserved_tag_table_t pf_reserved_tag_table
[NUM_RESERVED_TAGS
] = { 
 245         { PF_TAG_NAME_SYSTEM_SERVICE
, PF_TAG_ID_SYSTEM_SERVICE
}, 
 246         { PF_TAG_NAME_STACK_DROP
, PF_TAG_ID_STACK_DROP
}, 
 248 #define RESERVED_TAG_ID_MIN    PF_TAG_ID_SYSTEM_SERVICE 
 250 #define DYNAMIC_TAG_ID_MAX    50000 
 251 static TAILQ_HEAD(pf_tags
, pf_tagname
)  pf_tags 
= 
 252     TAILQ_HEAD_INITIALIZER(pf_tags
); 
 254 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 
 255 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 
 257 static u_int16_t         
tagname2tag(struct pf_tags 
*, char *); 
 258 static void              tag2tagname(struct pf_tags 
*, u_int16_t
, char *); 
 259 static void              tag_unref(struct pf_tags 
*, u_int16_t
); 
 260 static int               pf_rtlabel_add(struct pf_addr_wrap 
*); 
 261 static void              pf_rtlabel_remove(struct pf_addr_wrap 
*); 
 262 static void              pf_rtlabel_copyout(struct pf_addr_wrap 
*); 
 265 static int pf_inet_hook(struct ifnet 
*, struct mbuf 
**, int, 
 266     struct ip_fw_args 
*); 
 268 static int pf_inet6_hook(struct ifnet 
*, struct mbuf 
**, int, 
 269     struct ip_fw_args 
*); 
 271 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 
 274  * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit) 
 276 #define PFIOCX_STRUCT_DECL(s)                                           \ 
 279                 struct s##_32   _s##_32;                                \ 
 280                 struct s##_64   _s##_64;                                \ 
 284 #define PFIOCX_STRUCT_BEGIN(a, s, _action) {                            \ 
 285         VERIFY(s##_un == NULL);                                         \ 
 286         s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO);    \ 
 287         if (s##_un == NULL) {                                           \ 
 291                         bcopy(a, &s##_un->_u._s##_64,                   \ 
 292                             sizeof (struct s##_64));                    \ 
 294                         bcopy(a, &s##_un->_u._s##_32,                   \ 
 295                             sizeof (struct s##_32));                    \ 
 299 #define PFIOCX_STRUCT_END(s, a) {                                       \ 
 300         VERIFY(s##_un != NULL);                                         \ 
 302                 bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64));  \ 
 304                 bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32));  \ 
 305         _FREE(s##_un, M_TEMP);                                          \ 
 309 #define PFIOCX_STRUCT_ADDR32(s)         (&s##_un->_u._s##_32) 
 310 #define PFIOCX_STRUCT_ADDR64(s)         (&s##_un->_u._s##_64) 
 313  * Helper macros for regular ioctl structures. 
 315 #define PFIOC_STRUCT_BEGIN(a, v, _action) {                             \ 
 316         VERIFY((v) == NULL);                                            \ 
 317         (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO);          \ 
 321                 bcopy(a, v, sizeof (*(v)));                             \ 
 325 #define PFIOC_STRUCT_END(v, a) {                                        \ 
 326         VERIFY((v) != NULL);                                            \ 
 327         bcopy(v, a, sizeof (*(v)));                                     \ 
 332 #define PFIOC_STRUCT_ADDR32(s)          (&s##_un->_u._s##_32) 
 333 #define PFIOC_STRUCT_ADDR64(s)          (&s##_un->_u._s##_64) 
 335 static lck_attr_t 
*pf_perim_lock_attr
; 
 336 static lck_grp_t 
*pf_perim_lock_grp
; 
 337 static lck_grp_attr_t 
*pf_perim_lock_grp_attr
; 
 339 static lck_attr_t 
*pf_lock_attr
; 
 340 static lck_grp_t 
*pf_lock_grp
; 
 341 static lck_grp_attr_t 
*pf_lock_grp_attr
; 
 343 struct thread 
*pf_purge_thread
; 
 345 extern void pfi_kifaddr_update(void *); 
 347 /* pf enable ref-counting helper functions */ 
 348 static u_int64_t                
generate_token(struct proc 
*); 
 349 static int                      remove_token(struct pfioc_remove_token 
*); 
 350 static void                     invalidate_all_tokens(void); 
 353 generate_token(struct proc 
*p
) 
 355         u_int64_t token_value
; 
 356         struct pfioc_kernel_token 
*new_token
; 
 358         if (nr_tokens 
+ 1 > NR_TOKENS_LIMIT
) { 
 359                 os_log_error(OS_LOG_DEFAULT
, "%s: NR_TOKENS_LIMIT reached", __func__
); 
 363         new_token 
= _MALLOC(sizeof(struct pfioc_kernel_token
), M_TEMP
, 
 366         LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
); 
 368         if (new_token 
== NULL
) { 
 369                 /* malloc failed! bail! */ 
 370                 os_log_error(OS_LOG_DEFAULT
, "%s: unable to allocate pf token structure!", __func__
); 
 374         token_value 
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)new_token
); 
 376         new_token
->token
.token_value 
= token_value
; 
 377         new_token
->token
.pid 
= proc_pid(p
); 
 378         proc_name(new_token
->token
.pid
, new_token
->token
.proc_name
, 
 379             sizeof(new_token
->token
.proc_name
)); 
 380         new_token
->token
.timestamp 
= pf_calendar_time_second(); 
 382         SLIST_INSERT_HEAD(&token_list_head
, new_token
, next
); 
 389 remove_token(struct pfioc_remove_token 
*tok
) 
 391         struct pfioc_kernel_token 
*entry
, *tmp
; 
 393         LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
); 
 395         SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) { 
 396                 if (tok
->token_value 
== entry
->token
.token_value
) { 
 397                         SLIST_REMOVE(&token_list_head
, entry
, 
 398                             pfioc_kernel_token
, next
); 
 399                         _FREE(entry
, M_TEMP
); 
 401                         return 0;    /* success */ 
 405         printf("pf : remove failure\n"); 
 406         return ESRCH
;    /* failure */ 
 410 invalidate_all_tokens(void) 
 412         struct pfioc_kernel_token 
*entry
, *tmp
; 
 414         LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
); 
 416         SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) { 
 417                 SLIST_REMOVE(&token_list_head
, entry
, pfioc_kernel_token
, next
); 
 418                 _FREE(entry
, M_TEMP
); 
 427         u_int32_t 
*t 
= pf_default_rule
.timeout
; 
 430         pf_perim_lock_grp_attr 
= lck_grp_attr_alloc_init(); 
 431         pf_perim_lock_grp 
= lck_grp_alloc_init("pf_perim", 
 432             pf_perim_lock_grp_attr
); 
 433         pf_perim_lock_attr 
= lck_attr_alloc_init(); 
 434         lck_rw_init(pf_perim_lock
, pf_perim_lock_grp
, pf_perim_lock_attr
); 
 436         pf_lock_grp_attr 
= lck_grp_attr_alloc_init(); 
 437         pf_lock_grp 
= lck_grp_alloc_init("pf", pf_lock_grp_attr
); 
 438         pf_lock_attr 
= lck_attr_alloc_init(); 
 439         lck_mtx_init(pf_lock
, pf_lock_grp
, pf_lock_attr
); 
 441         pool_init(&pf_rule_pl
, sizeof(struct pf_rule
), 0, 0, 0, "pfrulepl", 
 443         pool_init(&pf_src_tree_pl
, sizeof(struct pf_src_node
), 0, 0, 0, 
 445         pool_init(&pf_state_pl
, sizeof(struct pf_state
), 0, 0, 0, "pfstatepl", 
 447         pool_init(&pf_state_key_pl
, sizeof(struct pf_state_key
), 0, 0, 0, 
 448             "pfstatekeypl", NULL
); 
 449         pool_init(&pf_app_state_pl
, sizeof(struct pf_app_state
), 0, 0, 0, 
 450             "pfappstatepl", NULL
); 
 451         pool_init(&pf_pooladdr_pl
, sizeof(struct pf_pooladdr
), 0, 0, 0, 
 452             "pfpooladdrpl", NULL
); 
 455         pf_osfp_initialize(); 
 457         pool_sethardlimit(pf_pool_limits
[PF_LIMIT_STATES
].pp
, 
 458             pf_pool_limits
[PF_LIMIT_STATES
].limit
, NULL
, 0); 
 460         if (max_mem 
<= 256 * 1024 * 1024) { 
 461                 pf_pool_limits
[PF_LIMIT_TABLE_ENTRIES
].limit 
= 
 462                     PFR_KENTRY_HIWAT_SMALL
; 
 465         RB_INIT(&tree_src_tracking
); 
 466         RB_INIT(&pf_anchors
); 
 467         pf_init_ruleset(&pf_main_ruleset
); 
 468         TAILQ_INIT(&pf_pabuf
); 
 469         TAILQ_INIT(&state_list
); 
 471         _CASSERT((SC_BE 
& SCIDX_MASK
) == SCIDX_BE
); 
 472         _CASSERT((SC_BK_SYS 
& SCIDX_MASK
) == SCIDX_BK_SYS
); 
 473         _CASSERT((SC_BK 
& SCIDX_MASK
) == SCIDX_BK
); 
 474         _CASSERT((SC_RD 
& SCIDX_MASK
) == SCIDX_RD
); 
 475         _CASSERT((SC_OAM 
& SCIDX_MASK
) == SCIDX_OAM
); 
 476         _CASSERT((SC_AV 
& SCIDX_MASK
) == SCIDX_AV
); 
 477         _CASSERT((SC_RV 
& SCIDX_MASK
) == SCIDX_RV
); 
 478         _CASSERT((SC_VI 
& SCIDX_MASK
) == SCIDX_VI
); 
 479         _CASSERT((SC_SIG 
& SCIDX_MASK
) == SCIDX_SIG
); 
 480         _CASSERT((SC_VO 
& SCIDX_MASK
) == SCIDX_VO
); 
 481         _CASSERT((SC_CTL 
& SCIDX_MASK
) == SCIDX_CTL
); 
 483         /* default rule should never be garbage collected */ 
 484         pf_default_rule
.entries
.tqe_prev 
= &pf_default_rule
.entries
.tqe_next
; 
 485         pf_default_rule
.action 
= PF_PASS
; 
 486         pf_default_rule
.nr 
= -1; 
 487         pf_default_rule
.rtableid 
= IFSCOPE_NONE
; 
 489         /* initialize default timeouts */ 
 490         t
[PFTM_TCP_FIRST_PACKET
] = PFTM_TCP_FIRST_PACKET_VAL
; 
 491         t
[PFTM_TCP_OPENING
] = PFTM_TCP_OPENING_VAL
; 
 492         t
[PFTM_TCP_ESTABLISHED
] = PFTM_TCP_ESTABLISHED_VAL
; 
 493         t
[PFTM_TCP_CLOSING
] = PFTM_TCP_CLOSING_VAL
; 
 494         t
[PFTM_TCP_FIN_WAIT
] = PFTM_TCP_FIN_WAIT_VAL
; 
 495         t
[PFTM_TCP_CLOSED
] = PFTM_TCP_CLOSED_VAL
; 
 496         t
[PFTM_UDP_FIRST_PACKET
] = PFTM_UDP_FIRST_PACKET_VAL
; 
 497         t
[PFTM_UDP_SINGLE
] = PFTM_UDP_SINGLE_VAL
; 
 498         t
[PFTM_UDP_MULTIPLE
] = PFTM_UDP_MULTIPLE_VAL
; 
 499         t
[PFTM_ICMP_FIRST_PACKET
] = PFTM_ICMP_FIRST_PACKET_VAL
; 
 500         t
[PFTM_ICMP_ERROR_REPLY
] = PFTM_ICMP_ERROR_REPLY_VAL
; 
 501         t
[PFTM_GREv1_FIRST_PACKET
] = PFTM_GREv1_FIRST_PACKET_VAL
; 
 502         t
[PFTM_GREv1_INITIATING
] = PFTM_GREv1_INITIATING_VAL
; 
 503         t
[PFTM_GREv1_ESTABLISHED
] = PFTM_GREv1_ESTABLISHED_VAL
; 
 504         t
[PFTM_ESP_FIRST_PACKET
] = PFTM_ESP_FIRST_PACKET_VAL
; 
 505         t
[PFTM_ESP_INITIATING
] = PFTM_ESP_INITIATING_VAL
; 
 506         t
[PFTM_ESP_ESTABLISHED
] = PFTM_ESP_ESTABLISHED_VAL
; 
 507         t
[PFTM_OTHER_FIRST_PACKET
] = PFTM_OTHER_FIRST_PACKET_VAL
; 
 508         t
[PFTM_OTHER_SINGLE
] = PFTM_OTHER_SINGLE_VAL
; 
 509         t
[PFTM_OTHER_MULTIPLE
] = PFTM_OTHER_MULTIPLE_VAL
; 
 510         t
[PFTM_FRAG
] = PFTM_FRAG_VAL
; 
 511         t
[PFTM_INTERVAL
] = PFTM_INTERVAL_VAL
; 
 512         t
[PFTM_SRC_NODE
] = PFTM_SRC_NODE_VAL
; 
 513         t
[PFTM_TS_DIFF
] = PFTM_TS_DIFF_VAL
; 
 514         t
[PFTM_ADAPTIVE_START
] = PFSTATE_ADAPT_START
; 
 515         t
[PFTM_ADAPTIVE_END
] = PFSTATE_ADAPT_END
; 
 518         bzero(&pf_status
, sizeof(pf_status
)); 
 519         pf_status
.debug 
= PF_DEBUG_URGENT
; 
 520         pf_hash_seed 
= RandomULong(); 
 522         /* XXX do our best to avoid a conflict */ 
 523         pf_status
.hostid 
= random(); 
 525         if (kernel_thread_start(pf_purge_thread_fn
, NULL
, 
 526             &pf_purge_thread
) != 0) { 
 527                 printf("%s: unable to start purge thread!", __func__
); 
 531         maj 
= cdevsw_add(PF_CDEV_MAJOR
, &pf_cdevsw
); 
 533                 printf("%s: failed to allocate major number!\n", __func__
); 
 536         (void) devfs_make_node(makedev(maj
, PFDEV_PF
), DEVFS_CHAR
, 
 537             UID_ROOT
, GID_WHEEL
, 0600, "pf", 0); 
 539         (void) devfs_make_node(makedev(maj
, PFDEV_PFM
), DEVFS_CHAR
, 
 540             UID_ROOT
, GID_WHEEL
, 0600, "pfm", 0); 
 552         struct pf_anchor        
*anchor
; 
 553         struct pf_state         
*state
; 
 554         struct pf_src_node      
*node
; 
 555         struct pfioc_table      pt
; 
 562         pf_status
.running 
= 0; 
 563         wakeup(pf_purge_thread_fn
); 
 565         /* clear the rulesets */ 
 566         for (i 
= 0; i 
< PF_RULESET_MAX
; i
++) { 
 567                 if (pf_begin_rules(&ticket
, i
, &r
) == 0) { 
 568                         pf_commit_rules(ticket
, i
, &r
); 
 573         RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) { 
 574                 state
->timeout 
= PFTM_PURGE
; 
 576                 state
->sync_flags 
= PFSTATE_NOSYNC
; 
 579         pf_purge_expired_states(pf_status
.states
); 
 582         pfsync_clear_states(pf_status
.hostid
, NULL
); 
 585         /* clear source nodes */ 
 586         RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) { 
 587                 state
->src_node 
= NULL
; 
 588                 state
->nat_src_node 
= NULL
; 
 590         RB_FOREACH(node
, pf_src_tree
, &tree_src_tracking
) { 
 594         pf_purge_expired_src_nodes(); 
 597         memset(&pt
, '\0', sizeof(pt
)); 
 598         pfr_clr_tables(&pt
.pfrio_table
, &pt
.pfrio_ndel
, pt
.pfrio_flags
); 
 600         /* destroy anchors */ 
 601         while ((anchor 
= RB_MIN(pf_anchor_global
, &pf_anchors
)) != NULL
) { 
 602                 for (i 
= 0; i 
< PF_RULESET_MAX
; i
++) { 
 603                         if (pf_begin_rules(&ticket
, i
, anchor
->name
) == 0) { 
 604                                 pf_commit_rules(ticket
, i
, anchor
->name
); 
 609         /* destroy main ruleset */ 
 610         pf_remove_if_empty_ruleset(&pf_main_ruleset
); 
 612         /* destroy the pools */ 
 613         pool_destroy(&pf_pooladdr_pl
); 
 614         pool_destroy(&pf_state_pl
); 
 615         pool_destroy(&pf_rule_pl
); 
 616         pool_destroy(&pf_src_tree_pl
); 
 618         /* destroy subsystems */ 
 619         pf_normalize_destroy(); 
 627 pfopen(dev_t dev
, int flags
, int fmt
, struct proc 
*p
) 
 629 #pragma unused(flags, fmt, p) 
 630         if (minor(dev
) >= PFDEV_MAX
) { 
 634         if (minor(dev
) == PFDEV_PFM
) { 
 635                 lck_mtx_lock(pf_lock
); 
 637                         lck_mtx_unlock(pf_lock
); 
 641                 lck_mtx_unlock(pf_lock
); 
 647 pfclose(dev_t dev
, int flags
, int fmt
, struct proc 
*p
) 
 649 #pragma unused(flags, fmt, p) 
 650         if (minor(dev
) >= PFDEV_MAX
) { 
 654         if (minor(dev
) == PFDEV_PFM
) { 
 655                 lck_mtx_lock(pf_lock
); 
 656                 VERIFY(pfdevcnt 
> 0); 
 658                 lck_mtx_unlock(pf_lock
); 
 663 static struct pf_pool 
* 
 664 pf_get_pool(char *anchor
, u_int32_t ticket
, u_int8_t rule_action
, 
 665     u_int32_t rule_number
, u_int8_t r_last
, u_int8_t active
, 
 666     u_int8_t check_ticket
) 
 668         struct pf_ruleset       
*ruleset
; 
 669         struct pf_rule          
*rule
; 
 672         ruleset 
= pf_find_ruleset(anchor
); 
 673         if (ruleset 
== NULL
) { 
 676         rs_num 
= pf_get_ruleset_number(rule_action
); 
 677         if (rs_num 
>= PF_RULESET_MAX
) { 
 681                 if (check_ticket 
&& ticket 
!= 
 682                     ruleset
->rules
[rs_num
].active
.ticket
) { 
 686                         rule 
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
, 
 689                         rule 
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
); 
 692                 if (check_ticket 
&& ticket 
!= 
 693                     ruleset
->rules
[rs_num
].inactive
.ticket
) { 
 697                         rule 
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
, 
 700                         rule 
= TAILQ_FIRST(ruleset
->rules
[rs_num
].inactive
.ptr
); 
 704                 while ((rule 
!= NULL
) && (rule
->nr 
!= rule_number
)) { 
 705                         rule 
= TAILQ_NEXT(rule
, entries
); 
 716 pf_mv_pool(struct pf_palist 
*poola
, struct pf_palist 
*poolb
) 
 718         struct pf_pooladdr      
*mv_pool_pa
; 
 720         while ((mv_pool_pa 
= TAILQ_FIRST(poola
)) != NULL
) { 
 721                 TAILQ_REMOVE(poola
, mv_pool_pa
, entries
); 
 722                 TAILQ_INSERT_TAIL(poolb
, mv_pool_pa
, entries
); 
 727 pf_empty_pool(struct pf_palist 
*poola
) 
 729         struct pf_pooladdr      
*empty_pool_pa
; 
 731         while ((empty_pool_pa 
= TAILQ_FIRST(poola
)) != NULL
) { 
 732                 pfi_dynaddr_remove(&empty_pool_pa
->addr
); 
 733                 pf_tbladdr_remove(&empty_pool_pa
->addr
); 
 734                 pfi_kif_unref(empty_pool_pa
->kif
, PFI_KIF_REF_RULE
); 
 735                 TAILQ_REMOVE(poola
, empty_pool_pa
, entries
); 
 736                 pool_put(&pf_pooladdr_pl
, empty_pool_pa
); 
 741 pf_rm_rule(struct pf_rulequeue 
*rulequeue
, struct pf_rule 
*rule
) 
 743         if (rulequeue 
!= NULL
) { 
 744                 if (rule
->states 
<= 0) { 
 746                          * XXX - we need to remove the table *before* detaching 
 747                          * the rule to make sure the table code does not delete 
 748                          * the anchor under our feet. 
 750                         pf_tbladdr_remove(&rule
->src
.addr
); 
 751                         pf_tbladdr_remove(&rule
->dst
.addr
); 
 752                         if (rule
->overload_tbl
) { 
 753                                 pfr_detach_table(rule
->overload_tbl
); 
 756                 TAILQ_REMOVE(rulequeue
, rule
, entries
); 
 757                 rule
->entries
.tqe_prev 
= NULL
; 
 761         if (rule
->states 
> 0 || rule
->src_nodes 
> 0 || 
 762             rule
->entries
.tqe_prev 
!= NULL
) { 
 765         pf_tag_unref(rule
->tag
); 
 766         pf_tag_unref(rule
->match_tag
); 
 767         pf_rtlabel_remove(&rule
->src
.addr
); 
 768         pf_rtlabel_remove(&rule
->dst
.addr
); 
 769         pfi_dynaddr_remove(&rule
->src
.addr
); 
 770         pfi_dynaddr_remove(&rule
->dst
.addr
); 
 771         if (rulequeue 
== NULL
) { 
 772                 pf_tbladdr_remove(&rule
->src
.addr
); 
 773                 pf_tbladdr_remove(&rule
->dst
.addr
); 
 774                 if (rule
->overload_tbl
) { 
 775                         pfr_detach_table(rule
->overload_tbl
); 
 778         pfi_kif_unref(rule
->kif
, PFI_KIF_REF_RULE
); 
 779         pf_anchor_remove(rule
); 
 780         pf_empty_pool(&rule
->rpool
.list
); 
 781         pool_put(&pf_rule_pl
, rule
); 
 785 tagname2tag(struct pf_tags 
*head
, char *tagname
) 
 787         struct pf_tagname       
*tag
, *p 
= NULL
; 
 788         uint16_t                 new_tagid 
= 1; 
 789         bool                     reserved_tag 
= false; 
 791         TAILQ_FOREACH(tag
, head
, entries
) 
 792         if (strcmp(tagname
, tag
->name
) == 0) { 
 798          * check if it is a reserved tag. 
 800         _CASSERT(RESERVED_TAG_ID_MIN 
> DYNAMIC_TAG_ID_MAX
); 
 801         for (int i 
= 0; i 
< NUM_RESERVED_TAGS
; i
++) { 
 802                 if (strncmp(tagname
, pf_reserved_tag_table
[i
].tag_name
, 
 803                     PF_TAG_NAME_SIZE
) == 0) { 
 804                         new_tagid 
= pf_reserved_tag_table
[i
].tag_id
; 
 806                         goto skip_dynamic_tag_alloc
; 
 811          * to avoid fragmentation, we do a linear search from the beginning 
 812          * and take the first free slot we find. if there is none or the list 
 813          * is empty, append a new entry at the end. 
 817         if (!TAILQ_EMPTY(head
)) { 
 818                 /* skip reserved tags */ 
 819                 for (p 
= TAILQ_FIRST(head
); p 
!= NULL 
&& 
 820                     p
->tag 
>= RESERVED_TAG_ID_MIN
; 
 821                     p 
= TAILQ_NEXT(p
, entries
)) { 
 825                 for (; p 
!= NULL 
&& p
->tag 
== new_tagid
; 
 826                     p 
= TAILQ_NEXT(p
, entries
)) { 
 827                         new_tagid 
= p
->tag 
+ 1; 
 831         if (new_tagid 
> DYNAMIC_TAG_ID_MAX
) { 
 835 skip_dynamic_tag_alloc
: 
 836         /* allocate and fill new struct pf_tagname */ 
 837         tag 
= _MALLOC(sizeof(*tag
), M_TEMP
, M_WAITOK 
| M_ZERO
); 
 841         strlcpy(tag
->name
, tagname
, sizeof(tag
->name
)); 
 842         tag
->tag 
= new_tagid
; 
 845         if (reserved_tag
) { /* insert reserved tag at the head */ 
 846                 TAILQ_INSERT_HEAD(head
, tag
, entries
); 
 847         } else if (p 
!= NULL
) { /* insert new entry before p */ 
 848                 TAILQ_INSERT_BEFORE(p
, tag
, entries
); 
 849         } else { /* either list empty or no free slot in between */ 
 850                 TAILQ_INSERT_TAIL(head
, tag
, entries
); 
 857 tag2tagname(struct pf_tags 
*head
, u_int16_t tagid
, char *p
) 
 859         struct pf_tagname       
*tag
; 
 861         TAILQ_FOREACH(tag
, head
, entries
) 
 862         if (tag
->tag 
== tagid
) { 
 863                 strlcpy(p
, tag
->name
, PF_TAG_NAME_SIZE
); 
 869 tag_unref(struct pf_tags 
*head
, u_int16_t tag
) 
 871         struct pf_tagname       
*p
, *next
; 
 877         for (p 
= TAILQ_FIRST(head
); p 
!= NULL
; p 
= next
) { 
 878                 next 
= TAILQ_NEXT(p
, entries
); 
 881                                 TAILQ_REMOVE(head
, p
, entries
); 
 890 pf_tagname2tag(char *tagname
) 
 892         return tagname2tag(&pf_tags
, tagname
); 
 896 pf_tag2tagname(u_int16_t tagid
, char *p
) 
 898         tag2tagname(&pf_tags
, tagid
, p
); 
 902 pf_tag_ref(u_int16_t tag
) 
 904         struct pf_tagname 
*t
; 
 906         TAILQ_FOREACH(t
, &pf_tags
, entries
) 
 916 pf_tag_unref(u_int16_t tag
) 
 918         tag_unref(&pf_tags
, tag
); 
 922 pf_rtlabel_add(struct pf_addr_wrap 
*a
) 
 929 pf_rtlabel_remove(struct pf_addr_wrap 
*a
) 
 935 pf_rtlabel_copyout(struct pf_addr_wrap 
*a
) 
 941 pf_begin_rules(u_int32_t 
*ticket
, int rs_num
, const char *anchor
) 
 943         struct pf_ruleset       
*rs
; 
 944         struct pf_rule          
*rule
; 
 946         if (rs_num 
< 0 || rs_num 
>= PF_RULESET_MAX
) { 
 949         rs 
= pf_find_or_create_ruleset(anchor
); 
 953         while ((rule 
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) { 
 954                 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
); 
 955                 rs
->rules
[rs_num
].inactive
.rcount
--; 
 957         *ticket 
= ++rs
->rules
[rs_num
].inactive
.ticket
; 
 958         rs
->rules
[rs_num
].inactive
.open 
= 1; 
 963 pf_rollback_rules(u_int32_t ticket
, int rs_num
, char *anchor
) 
 965         struct pf_ruleset       
*rs
; 
 966         struct pf_rule          
*rule
; 
 968         if (rs_num 
< 0 || rs_num 
>= PF_RULESET_MAX
) { 
 971         rs 
= pf_find_ruleset(anchor
); 
 972         if (rs 
== NULL 
|| !rs
->rules
[rs_num
].inactive
.open 
|| 
 973             rs
->rules
[rs_num
].inactive
.ticket 
!= ticket
) { 
 976         while ((rule 
= TAILQ_FIRST(rs
->rules
[rs_num
].inactive
.ptr
)) != NULL
) { 
 977                 pf_rm_rule(rs
->rules
[rs_num
].inactive
.ptr
, rule
); 
 978                 rs
->rules
[rs_num
].inactive
.rcount
--; 
 980         rs
->rules
[rs_num
].inactive
.open 
= 0; 
 984 #define PF_MD5_UPD(st, elm)                                             \ 
 985         MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm)) 
 987 #define PF_MD5_UPD_STR(st, elm)                                         \ 
 988         MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm)) 
 990 #define PF_MD5_UPD_HTONL(st, elm, stor) do {                            \ 
 991         (stor) = htonl((st)->elm);                                      \ 
 992         MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t));        \ 
 995 #define PF_MD5_UPD_HTONS(st, elm, stor) do {                            \ 
 996         (stor) = htons((st)->elm);                                      \ 
 997         MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t));        \ 
1001 pf_hash_rule_addr(MD5_CTX 
*ctx
, struct pf_rule_addr 
*pfr
, u_int8_t proto
) 
1003         PF_MD5_UPD(pfr
, addr
.type
); 
1004         switch (pfr
->addr
.type
) { 
1005         case PF_ADDR_DYNIFTL
: 
1006                 PF_MD5_UPD(pfr
, addr
.v
.ifname
); 
1007                 PF_MD5_UPD(pfr
, addr
.iflags
); 
1010                 PF_MD5_UPD(pfr
, addr
.v
.tblname
); 
1012         case PF_ADDR_ADDRMASK
: 
1013                 /* XXX ignore af? */ 
1014                 PF_MD5_UPD(pfr
, addr
.v
.a
.addr
.addr32
); 
1015                 PF_MD5_UPD(pfr
, addr
.v
.a
.mask
.addr32
); 
1017         case PF_ADDR_RTLABEL
: 
1018                 PF_MD5_UPD(pfr
, addr
.v
.rtlabelname
); 
1025                 PF_MD5_UPD(pfr
, xport
.range
.port
[0]); 
1026                 PF_MD5_UPD(pfr
, xport
.range
.port
[1]); 
1027                 PF_MD5_UPD(pfr
, xport
.range
.op
); 
1034         PF_MD5_UPD(pfr
, neg
); 
1038 pf_hash_rule(MD5_CTX 
*ctx
, struct pf_rule 
*rule
) 
1043         pf_hash_rule_addr(ctx
, &rule
->src
, rule
->proto
); 
1044         pf_hash_rule_addr(ctx
, &rule
->dst
, rule
->proto
); 
1045         PF_MD5_UPD_STR(rule
, label
); 
1046         PF_MD5_UPD_STR(rule
, ifname
); 
1047         PF_MD5_UPD_STR(rule
, match_tagname
); 
1048         PF_MD5_UPD_HTONS(rule
, match_tag
, x
); /* dup? */ 
1049         PF_MD5_UPD_HTONL(rule
, os_fingerprint
, y
); 
1050         PF_MD5_UPD_HTONL(rule
, prob
, y
); 
1051         PF_MD5_UPD_HTONL(rule
, uid
.uid
[0], y
); 
1052         PF_MD5_UPD_HTONL(rule
, uid
.uid
[1], y
); 
1053         PF_MD5_UPD(rule
, uid
.op
); 
1054         PF_MD5_UPD_HTONL(rule
, gid
.gid
[0], y
); 
1055         PF_MD5_UPD_HTONL(rule
, gid
.gid
[1], y
); 
1056         PF_MD5_UPD(rule
, gid
.op
); 
1057         PF_MD5_UPD_HTONL(rule
, rule_flag
, y
); 
1058         PF_MD5_UPD(rule
, action
); 
1059         PF_MD5_UPD(rule
, direction
); 
1060         PF_MD5_UPD(rule
, af
); 
1061         PF_MD5_UPD(rule
, quick
); 
1062         PF_MD5_UPD(rule
, ifnot
); 
1063         PF_MD5_UPD(rule
, match_tag_not
); 
1064         PF_MD5_UPD(rule
, natpass
); 
1065         PF_MD5_UPD(rule
, keep_state
); 
1066         PF_MD5_UPD(rule
, proto
); 
1067         PF_MD5_UPD(rule
, type
); 
1068         PF_MD5_UPD(rule
, code
); 
1069         PF_MD5_UPD(rule
, flags
); 
1070         PF_MD5_UPD(rule
, flagset
); 
1071         PF_MD5_UPD(rule
, allow_opts
); 
1072         PF_MD5_UPD(rule
, rt
); 
1073         PF_MD5_UPD(rule
, tos
); 
1077 pf_commit_rules(u_int32_t ticket
, int rs_num
, char *anchor
) 
1079         struct pf_ruleset       
*rs
; 
1080         struct pf_rule          
*rule
, **old_array
, *r
; 
1081         struct pf_rulequeue     
*old_rules
; 
1083         u_int32_t                old_rcount
; 
1085         LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
); 
1087         if (rs_num 
< 0 || rs_num 
>= PF_RULESET_MAX
) { 
1090         rs 
= pf_find_ruleset(anchor
); 
1091         if (rs 
== NULL 
|| !rs
->rules
[rs_num
].inactive
.open 
|| 
1092             ticket 
!= rs
->rules
[rs_num
].inactive
.ticket
) { 
1096         /* Calculate checksum for the main ruleset */ 
1097         if (rs 
== &pf_main_ruleset
) { 
1098                 error 
= pf_setup_pfsync_matching(rs
); 
1104         /* Swap rules, keep the old. */ 
1105         old_rules 
= rs
->rules
[rs_num
].active
.ptr
; 
1106         old_rcount 
= rs
->rules
[rs_num
].active
.rcount
; 
1107         old_array 
= rs
->rules
[rs_num
].active
.ptr_array
; 
1109         if (old_rcount 
!= 0) { 
1110                 r 
= TAILQ_FIRST(rs
->rules
[rs_num
].active
.ptr
); 
1112                         if (r
->rule_flag 
& PFRULE_PFM
) { 
1115                         r 
= TAILQ_NEXT(r
, entries
); 
1120         rs
->rules
[rs_num
].active
.ptr 
= 
1121             rs
->rules
[rs_num
].inactive
.ptr
; 
1122         rs
->rules
[rs_num
].active
.ptr_array 
= 
1123             rs
->rules
[rs_num
].inactive
.ptr_array
; 
1124         rs
->rules
[rs_num
].active
.rcount 
= 
1125             rs
->rules
[rs_num
].inactive
.rcount
; 
1126         rs
->rules
[rs_num
].inactive
.ptr 
= old_rules
; 
1127         rs
->rules
[rs_num
].inactive
.ptr_array 
= old_array
; 
1128         rs
->rules
[rs_num
].inactive
.rcount 
= old_rcount
; 
1130         rs
->rules
[rs_num
].active
.ticket 
= 
1131             rs
->rules
[rs_num
].inactive
.ticket
; 
1132         pf_calc_skip_steps(rs
->rules
[rs_num
].active
.ptr
); 
1135         /* Purge the old rule list. */ 
1136         while ((rule 
= TAILQ_FIRST(old_rules
)) != NULL
) { 
1137                 pf_rm_rule(old_rules
, rule
); 
1139         if (rs
->rules
[rs_num
].inactive
.ptr_array
) { 
1140                 _FREE(rs
->rules
[rs_num
].inactive
.ptr_array
, M_TEMP
); 
1142         rs
->rules
[rs_num
].inactive
.ptr_array 
= NULL
; 
1143         rs
->rules
[rs_num
].inactive
.rcount 
= 0; 
1144         rs
->rules
[rs_num
].inactive
.open 
= 0; 
1145         pf_remove_if_empty_ruleset(rs
); 
1150 pf_rule_copyin(struct pf_rule 
*src
, struct pf_rule 
*dst
, struct proc 
*p
, 
1153         bcopy(src
, dst
, sizeof(struct pf_rule
)); 
1155         dst
->label
[sizeof(dst
->label
) - 1] = '\0'; 
1156         dst
->ifname
[sizeof(dst
->ifname
) - 1] = '\0'; 
1157         dst
->qname
[sizeof(dst
->qname
) - 1] = '\0'; 
1158         dst
->pqname
[sizeof(dst
->pqname
) - 1] = '\0'; 
1159         dst
->tagname
[sizeof(dst
->tagname
) - 1] = '\0'; 
1160         dst
->match_tagname
[sizeof(dst
->match_tagname
) - 1] = '\0'; 
1161         dst
->overload_tblname
[sizeof(dst
->overload_tblname
) - 1] = '\0'; 
1162         dst
->owner
[sizeof(dst
->owner
) - 1] = '\0'; 
1164         dst
->cuid 
= kauth_cred_getuid(p
->p_ucred
); 
1165         dst
->cpid 
= p
->p_pid
; 
1169         dst
->overload_tbl 
= NULL
; 
1171         TAILQ_INIT(&dst
->rpool
.list
); 
1172         dst
->rpool
.cur 
= NULL
; 
1174         /* initialize refcounting */ 
1178         dst
->entries
.tqe_prev 
= NULL
; 
1179         dst
->entries
.tqe_next 
= NULL
; 
1180         if ((uint8_t)minordev 
== PFDEV_PFM
) { 
1181                 dst
->rule_flag 
|= PFRULE_PFM
; 
1186 pf_rule_copyout(struct pf_rule 
*src
, struct pf_rule 
*dst
) 
1188         bcopy(src
, dst
, sizeof(struct pf_rule
)); 
1192         dst
->overload_tbl 
= NULL
; 
1194         dst
->rpool
.list
.tqh_first 
= NULL
; 
1195         dst
->rpool
.list
.tqh_last 
= NULL
; 
1196         dst
->rpool
.cur 
= NULL
; 
1198         dst
->entries
.tqe_prev 
= NULL
; 
1199         dst
->entries
.tqe_next 
= NULL
; 
1203 pf_state_export(struct pfsync_state 
*sp
, struct pf_state_key 
*sk
, 
1206         uint64_t secs 
= pf_time_second(); 
1207         bzero(sp
, sizeof(struct pfsync_state
)); 
1209         /* copy from state key */ 
1210         sp
->lan
.addr 
= sk
->lan
.addr
; 
1211         sp
->lan
.xport 
= sk
->lan
.xport
; 
1212         sp
->gwy
.addr 
= sk
->gwy
.addr
; 
1213         sp
->gwy
.xport 
= sk
->gwy
.xport
; 
1214         sp
->ext_lan
.addr 
= sk
->ext_lan
.addr
; 
1215         sp
->ext_lan
.xport 
= sk
->ext_lan
.xport
; 
1216         sp
->ext_gwy
.addr 
= sk
->ext_gwy
.addr
; 
1217         sp
->ext_gwy
.xport 
= sk
->ext_gwy
.xport
; 
1218         sp
->proto_variant 
= sk
->proto_variant
; 
1220         sp
->proto 
= sk
->proto
; 
1221         sp
->af_lan 
= sk
->af_lan
; 
1222         sp
->af_gwy 
= sk
->af_gwy
; 
1223         sp
->direction 
= sk
->direction
; 
1224         sp
->flowhash 
= sk
->flowhash
; 
1226         /* copy from state */ 
1227         memcpy(&sp
->id
, &s
->id
, sizeof(sp
->id
)); 
1228         sp
->creatorid 
= s
->creatorid
; 
1229         strlcpy(sp
->ifname
, s
->kif
->pfik_name
, sizeof(sp
->ifname
)); 
1230         pf_state_peer_to_pfsync(&s
->src
, &sp
->src
); 
1231         pf_state_peer_to_pfsync(&s
->dst
, &sp
->dst
); 
1233         sp
->rule 
= s
->rule
.ptr
->nr
; 
1234         sp
->nat_rule 
= (s
->nat_rule
.ptr 
== NULL
) ? 
1235             (unsigned)-1 : s
->nat_rule
.ptr
->nr
; 
1236         sp
->anchor 
= (s
->anchor
.ptr 
== NULL
) ? 
1237             (unsigned)-1 : s
->anchor
.ptr
->nr
; 
1239         pf_state_counter_to_pfsync(s
->bytes
[0], sp
->bytes
[0]); 
1240         pf_state_counter_to_pfsync(s
->bytes
[1], sp
->bytes
[1]); 
1241         pf_state_counter_to_pfsync(s
->packets
[0], sp
->packets
[0]); 
1242         pf_state_counter_to_pfsync(s
->packets
[1], sp
->packets
[1]); 
1243         sp
->creation 
= secs 
- s
->creation
; 
1244         sp
->expire 
= pf_state_expires(s
); 
1246         sp
->allow_opts 
= s
->allow_opts
; 
1247         sp
->timeout 
= s
->timeout
; 
1250                 sp
->sync_flags 
|= PFSYNC_FLAG_SRCNODE
; 
1252         if (s
->nat_src_node
) { 
1253                 sp
->sync_flags 
|= PFSYNC_FLAG_NATSRCNODE
; 
1256         if (sp
->expire 
> secs
) { 
1264 pf_state_import(struct pfsync_state 
*sp
, struct pf_state_key 
*sk
, 
1267         /* copy to state key */ 
1268         sk
->lan
.addr 
= sp
->lan
.addr
; 
1269         sk
->lan
.xport 
= sp
->lan
.xport
; 
1270         sk
->gwy
.addr 
= sp
->gwy
.addr
; 
1271         sk
->gwy
.xport 
= sp
->gwy
.xport
; 
1272         sk
->ext_lan
.addr 
= sp
->ext_lan
.addr
; 
1273         sk
->ext_lan
.xport 
= sp
->ext_lan
.xport
; 
1274         sk
->ext_gwy
.addr 
= sp
->ext_gwy
.addr
; 
1275         sk
->ext_gwy
.xport 
= sp
->ext_gwy
.xport
; 
1276         sk
->proto_variant 
= sp
->proto_variant
; 
1278         sk
->proto 
= sp
->proto
; 
1279         sk
->af_lan 
= sp
->af_lan
; 
1280         sk
->af_gwy 
= sp
->af_gwy
; 
1281         sk
->direction 
= sp
->direction
; 
1282         sk
->flowhash 
= pf_calc_state_key_flowhash(sk
); 
1285         memcpy(&s
->id
, &sp
->id
, sizeof(sp
->id
)); 
1286         s
->creatorid 
= sp
->creatorid
; 
1287         pf_state_peer_from_pfsync(&sp
->src
, &s
->src
); 
1288         pf_state_peer_from_pfsync(&sp
->dst
, &s
->dst
); 
1290         s
->rule
.ptr 
= &pf_default_rule
; 
1291         s
->nat_rule
.ptr 
= NULL
; 
1292         s
->anchor
.ptr 
= NULL
; 
1294         s
->creation 
= pf_time_second(); 
1295         s
->expire 
= pf_time_second(); 
1296         if (sp
->expire 
> 0) { 
1297                 s
->expire 
-= pf_default_rule
.timeout
[sp
->timeout
] - sp
->expire
; 
1300         s
->packets
[0] = s
->packets
[1] = 0; 
1301         s
->bytes
[0] = s
->bytes
[1] = 0; 
1305 pf_pooladdr_copyin(struct pf_pooladdr 
*src
, struct pf_pooladdr 
*dst
) 
1307         bcopy(src
, dst
, sizeof(struct pf_pooladdr
)); 
1309         dst
->entries
.tqe_prev 
= NULL
; 
1310         dst
->entries
.tqe_next 
= NULL
; 
1311         dst
->ifname
[sizeof(dst
->ifname
) - 1] = '\0'; 
1316 pf_pooladdr_copyout(struct pf_pooladdr 
*src
, struct pf_pooladdr 
*dst
) 
1318         bcopy(src
, dst
, sizeof(struct pf_pooladdr
)); 
1320         dst
->entries
.tqe_prev 
= NULL
; 
1321         dst
->entries
.tqe_next 
= NULL
; 
1326 pf_setup_pfsync_matching(struct pf_ruleset 
*rs
) 
1329         struct pf_rule          
*rule
; 
1331         u_int8_t                 digest
[PF_MD5_DIGEST_LENGTH
]; 
1334         for (rs_cnt 
= 0; rs_cnt 
< PF_RULESET_MAX
; rs_cnt
++) { 
1335                 /* XXX PF_RULESET_SCRUB as well? */ 
1336                 if (rs_cnt 
== PF_RULESET_SCRUB
) { 
1340                 if (rs
->rules
[rs_cnt
].inactive
.ptr_array
) { 
1341                         _FREE(rs
->rules
[rs_cnt
].inactive
.ptr_array
, M_TEMP
); 
1343                 rs
->rules
[rs_cnt
].inactive
.ptr_array 
= NULL
; 
1345                 if (rs
->rules
[rs_cnt
].inactive
.rcount
) { 
1346                         rs
->rules
[rs_cnt
].inactive
.ptr_array 
= 
1347                             _MALLOC(sizeof(caddr_t
) * 
1348                             rs
->rules
[rs_cnt
].inactive
.rcount
, 
1351                         if (!rs
->rules
[rs_cnt
].inactive
.ptr_array
) { 
1356                 TAILQ_FOREACH(rule
, rs
->rules
[rs_cnt
].inactive
.ptr
, 
1358                         pf_hash_rule(&ctx
, rule
); 
1359                         (rs
->rules
[rs_cnt
].inactive
.ptr_array
)[rule
->nr
] = rule
; 
1363         MD5Final(digest
, &ctx
); 
1364         memcpy(pf_status
.pf_chksum
, digest
, sizeof(pf_status
.pf_chksum
)); 
1371         LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
); 
1373         VERIFY(pf_is_enabled 
== 0); 
1376         pf_status
.running 
= 1; 
1377         pf_status
.since 
= pf_calendar_time_second(); 
1378         if (pf_status
.stateid 
== 0) { 
1379                 pf_status
.stateid 
= pf_time_second(); 
1380                 pf_status
.stateid 
= pf_status
.stateid 
<< 32; 
1382         wakeup(pf_purge_thread_fn
); 
1383         DPFPRINTF(PF_DEBUG_MISC
, ("pf: started\n")); 
1389         LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
); 
1391         VERIFY(pf_is_enabled
); 
1393         pf_status
.running 
= 0; 
1395         pf_status
.since 
= pf_calendar_time_second(); 
1396         wakeup(pf_purge_thread_fn
); 
1397         DPFPRINTF(PF_DEBUG_MISC
, ("pf: stopped\n")); 
1401 pfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, int flags
, struct proc 
*p
) 
1404         int p64 
= proc_is64bit(p
); 
1406         int minordev 
= minor(dev
); 
1408         if (kauth_cred_issuser(kauth_cred_get()) == 0) { 
1412         /* XXX keep in sync with switch() below */ 
1413         if (securelevel 
> 1) { 
1420                 case DIOCSETSTATUSIF
: 
1426                 case DIOCINSERTRULE
: 
1427                 case DIOCDELETERULE
: 
1428                 case DIOCGETTIMEOUT
: 
1429                 case DIOCCLRRULECTRS
: 
1434                 case DIOCGETRULESETS
: 
1435                 case DIOCGETRULESET
: 
1436                 case DIOCRGETTABLES
: 
1437                 case DIOCRGETTSTATS
: 
1438                 case DIOCRCLRTSTATS
: 
1444                 case DIOCRGETASTATS
: 
1445                 case DIOCRCLRASTATS
: 
1448                 case DIOCGETSRCNODES
: 
1449                 case DIOCCLRSRCNODES
: 
1450                 case DIOCIGETIFACES
: 
1455                 case DIOCRCLRTABLES
: 
1456                 case DIOCRADDTABLES
: 
1457                 case DIOCRDELTABLES
: 
1458                 case DIOCRSETTFLAGS
: { 
1461                         bcopy(&((struct pfioc_table 
*)(void *)addr
)-> 
1462                             pfrio_flags
, &pfrio_flags
, sizeof(pfrio_flags
)); 
1464                         if (pfrio_flags 
& PFR_FLAG_DUMMY
) { 
1465                                 break; /* dummy operation ok */ 
1474         if (!(flags 
& FWRITE
)) { 
1480                 case DIOCGETSTARTERS
: 
1487                 case DIOCINSERTRULE
: 
1488                 case DIOCDELETERULE
: 
1489                 case DIOCGETTIMEOUT
: 
1494                 case DIOCGETRULESETS
: 
1495                 case DIOCGETRULESET
: 
1497                 case DIOCRGETTABLES
: 
1498                 case DIOCRGETTSTATS
: 
1500                 case DIOCRGETASTATS
: 
1503                 case DIOCGETSRCNODES
: 
1504                 case DIOCIGETIFACES
: 
1507                 case DIOCRCLRTABLES
: 
1508                 case DIOCRADDTABLES
: 
1509                 case DIOCRDELTABLES
: 
1510                 case DIOCRCLRTSTATS
: 
1515                 case DIOCRSETTFLAGS
: { 
1518                         bcopy(&((struct pfioc_table 
*)(void *)addr
)-> 
1519                             pfrio_flags
, &pfrio_flags
, sizeof(pfrio_flags
)); 
1521                         if (pfrio_flags 
& PFR_FLAG_DUMMY
) { 
1522                                 flags 
|= FWRITE
; /* need write lock for dummy */ 
1523                                 break; /* dummy operation ok */ 
1530                         bcopy(&((struct pfioc_rule 
*)(void *)addr
)->action
, 
1531                             &action
, sizeof(action
)); 
1533                         if (action 
== PF_GET_CLR_CNTR
) { 
1543         if (flags 
& FWRITE
) { 
1544                 lck_rw_lock_exclusive(pf_perim_lock
); 
1546                 lck_rw_lock_shared(pf_perim_lock
); 
1549         lck_mtx_lock(pf_lock
); 
1553                 if (pf_status
.running
) { 
1555                          * Increment the reference for a simple -e enable, so 
1556                          * that even if other processes drop their references, 
1557                          * pf will still be available to processes that turned 
1558                          * it on without taking a reference 
1560                         if (nr_tokens 
== pf_enabled_ref_count
) { 
1561                                 pf_enabled_ref_count
++; 
1562                                 VERIFY(pf_enabled_ref_count 
!= 0); 
1565                 } else if (pf_purge_thread 
== NULL
) { 
1569                         pf_enabled_ref_count
++; 
1570                         VERIFY(pf_enabled_ref_count 
!= 0); 
1574         case DIOCSTARTREF
:              /* u_int64_t */ 
1575                 if (pf_purge_thread 
== NULL
) { 
1580                         /* small enough to be on stack */ 
1581                         if ((token 
= generate_token(p
)) != 0) { 
1582                                 if (pf_is_enabled 
== 0) { 
1585                                 pf_enabled_ref_count
++; 
1586                                 VERIFY(pf_enabled_ref_count 
!= 0); 
1589                                 DPFPRINTF(PF_DEBUG_URGENT
, 
1590                                     ("pf: unable to generate token\n")); 
1592                         bcopy(&token
, addr
, sizeof(token
)); 
1597                 if (!pf_status
.running
) { 
1601                         pf_enabled_ref_count 
= 0; 
1602                         invalidate_all_tokens(); 
1606         case DIOCSTOPREF
:               /* struct pfioc_remove_token */ 
1607                 if (!pf_status
.running
) { 
1610                         struct pfioc_remove_token pfrt
; 
1612                         /* small enough to be on stack */ 
1613                         bcopy(addr
, &pfrt
, sizeof(pfrt
)); 
1614                         if ((error 
= remove_token(&pfrt
)) == 0) { 
1615                                 VERIFY(pf_enabled_ref_count 
!= 0); 
1616                                 pf_enabled_ref_count
--; 
1617                                 /* return currently held references */ 
1618                                 pfrt
.refcount 
= pf_enabled_ref_count
; 
1619                                 DPFPRINTF(PF_DEBUG_MISC
, 
1620                                     ("pf: enabled refcount decremented\n")); 
1623                                 DPFPRINTF(PF_DEBUG_URGENT
, 
1624                                     ("pf: token mismatch\n")); 
1626                         bcopy(&pfrt
, addr
, sizeof(pfrt
)); 
1628                         if (error 
== 0 && pf_enabled_ref_count 
== 0) { 
1634         case DIOCGETSTARTERS
: {         /* struct pfioc_tokens */ 
1635                 PFIOCX_STRUCT_DECL(pfioc_tokens
); 
1637                 PFIOCX_STRUCT_BEGIN(addr
, pfioc_tokens
, error 
= ENOMEM
; break; ); 
1638                 error 
= pfioctl_ioc_tokens(cmd
, 
1639                     PFIOCX_STRUCT_ADDR32(pfioc_tokens
), 
1640                     PFIOCX_STRUCT_ADDR64(pfioc_tokens
), p
); 
1641                 PFIOCX_STRUCT_END(pfioc_tokens
, addr
); 
1645         case DIOCADDRULE
:               /* struct pfioc_rule */ 
1646         case DIOCGETRULES
:              /* struct pfioc_rule */ 
1647         case DIOCGETRULE
:               /* struct pfioc_rule */ 
1648         case DIOCCHANGERULE
:            /* struct pfioc_rule */ 
1649         case DIOCINSERTRULE
:            /* struct pfioc_rule */ 
1650         case DIOCDELETERULE
: {          /* struct pfioc_rule */ 
1651                 struct pfioc_rule 
*pr 
= NULL
; 
1653                 PFIOC_STRUCT_BEGIN(addr
, pr
, error 
= ENOMEM
; break; ); 
1654                 error 
= pfioctl_ioc_rule(cmd
, minordev
, pr
, p
); 
1655                 PFIOC_STRUCT_END(pr
, addr
); 
1659         case DIOCCLRSTATES
:             /* struct pfioc_state_kill */ 
1660         case DIOCKILLSTATES
: {          /* struct pfioc_state_kill */ 
1661                 struct pfioc_state_kill 
*psk 
= NULL
; 
1663                 PFIOC_STRUCT_BEGIN(addr
, psk
, error 
= ENOMEM
; break; ); 
1664                 error 
= pfioctl_ioc_state_kill(cmd
, psk
, p
); 
1665                 PFIOC_STRUCT_END(psk
, addr
); 
1669         case DIOCADDSTATE
:              /* struct pfioc_state */ 
1670         case DIOCGETSTATE
: {            /* struct pfioc_state */ 
1671                 struct pfioc_state 
*ps 
= NULL
; 
1673                 PFIOC_STRUCT_BEGIN(addr
, ps
, error 
= ENOMEM
; break; ); 
1674                 error 
= pfioctl_ioc_state(cmd
, ps
, p
); 
1675                 PFIOC_STRUCT_END(ps
, addr
); 
1679         case DIOCGETSTATES
: {           /* struct pfioc_states */ 
1680                 PFIOCX_STRUCT_DECL(pfioc_states
); 
1682                 PFIOCX_STRUCT_BEGIN(addr
, pfioc_states
, error 
= ENOMEM
; break; ); 
1683                 error 
= pfioctl_ioc_states(cmd
, 
1684                     PFIOCX_STRUCT_ADDR32(pfioc_states
), 
1685                     PFIOCX_STRUCT_ADDR64(pfioc_states
), p
); 
1686                 PFIOCX_STRUCT_END(pfioc_states
, addr
); 
1690         case DIOCGETSTATUS
: {           /* struct pf_status */ 
1691                 struct pf_status 
*s 
= NULL
; 
1693                 PFIOC_STRUCT_BEGIN(&pf_status
, s
, error 
= ENOMEM
; break; ); 
1694                 pfi_update_status(s
->ifname
, s
); 
1695                 PFIOC_STRUCT_END(s
, addr
); 
1699         case DIOCSETSTATUSIF
: {         /* struct pfioc_if */ 
1700                 struct pfioc_if 
*pi 
= (struct pfioc_if 
*)(void *)addr
; 
1702                 /* OK for unaligned accesses */ 
1703                 if (pi
->ifname
[0] == 0) { 
1704                         bzero(pf_status
.ifname
, IFNAMSIZ
); 
1707                 strlcpy(pf_status
.ifname
, pi
->ifname
, IFNAMSIZ
); 
1711         case DIOCCLRSTATUS
: { 
1712                 bzero(pf_status
.counters
, sizeof(pf_status
.counters
)); 
1713                 bzero(pf_status
.fcounters
, sizeof(pf_status
.fcounters
)); 
1714                 bzero(pf_status
.scounters
, sizeof(pf_status
.scounters
)); 
1715                 pf_status
.since 
= pf_calendar_time_second(); 
1716                 if (*pf_status
.ifname
) { 
1717                         pfi_update_status(pf_status
.ifname
, NULL
); 
1722         case DIOCNATLOOK
: {             /* struct pfioc_natlook */ 
1723                 struct pfioc_natlook 
*pnl 
= NULL
; 
1725                 PFIOC_STRUCT_BEGIN(addr
, pnl
, error 
= ENOMEM
; break; ); 
1726                 error 
= pfioctl_ioc_natlook(cmd
, pnl
, p
); 
1727                 PFIOC_STRUCT_END(pnl
, addr
); 
1731         case DIOCSETTIMEOUT
:            /* struct pfioc_tm */ 
1732         case DIOCGETTIMEOUT
: {          /* struct pfioc_tm */ 
1735                 /* small enough to be on stack */ 
1736                 bcopy(addr
, &pt
, sizeof(pt
)); 
1737                 error 
= pfioctl_ioc_tm(cmd
, &pt
, p
); 
1738                 bcopy(&pt
, addr
, sizeof(pt
)); 
1742         case DIOCGETLIMIT
:              /* struct pfioc_limit */ 
1743         case DIOCSETLIMIT
: {            /* struct pfioc_limit */ 
1744                 struct pfioc_limit pl
; 
1746                 /* small enough to be on stack */ 
1747                 bcopy(addr
, &pl
, sizeof(pl
)); 
1748                 error 
= pfioctl_ioc_limit(cmd
, &pl
, p
); 
1749                 bcopy(&pl
, addr
, sizeof(pl
)); 
1753         case DIOCSETDEBUG
: {            /* u_int32_t */ 
1754                 bcopy(addr
, &pf_status
.debug
, sizeof(u_int32_t
)); 
1758         case DIOCCLRRULECTRS
: { 
1759                 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 
1760                 struct pf_ruleset       
*ruleset 
= &pf_main_ruleset
; 
1761                 struct pf_rule          
*rule
; 
1764                     ruleset
->rules
[PF_RULESET_FILTER
].active
.ptr
, entries
) { 
1765                         rule
->evaluations 
= 0; 
1766                         rule
->packets
[0] = rule
->packets
[1] = 0; 
1767                         rule
->bytes
[0] = rule
->bytes
[1] = 0; 
1772         case DIOCGIFSPEED
: { 
1773                 struct pf_ifspeed 
*psp 
= (struct pf_ifspeed 
*)(void *)addr
; 
1774                 struct pf_ifspeed ps
; 
1778                 if (psp
->ifname
[0] != '\0') { 
1779                         /* Can we completely trust user-land? */ 
1780                         strlcpy(ps
.ifname
, psp
->ifname
, IFNAMSIZ
); 
1781                         ps
.ifname
[IFNAMSIZ 
- 1] = '\0'; 
1782                         ifp 
= ifunit(ps
.ifname
); 
1784                                 baudrate 
= ifp
->if_output_bw
.max_bw
; 
1785                                 bcopy(&baudrate
, &psp
->baudrate
, 
1796         case DIOCBEGINADDRS
:            /* struct pfioc_pooladdr */ 
1797         case DIOCADDADDR
:               /* struct pfioc_pooladdr */ 
1798         case DIOCGETADDRS
:              /* struct pfioc_pooladdr */ 
1799         case DIOCGETADDR
:               /* struct pfioc_pooladdr */ 
1800         case DIOCCHANGEADDR
: {          /* struct pfioc_pooladdr */ 
1801                 struct pfioc_pooladdr 
*pp 
= NULL
; 
1803                 PFIOC_STRUCT_BEGIN(addr
, pp
, error 
= ENOMEM
; break; ) 
1804                 error 
= pfioctl_ioc_pooladdr(cmd
, pp
, p
); 
1805                 PFIOC_STRUCT_END(pp
, addr
); 
1809         case DIOCGETRULESETS
:           /* struct pfioc_ruleset */ 
1810         case DIOCGETRULESET
: {          /* struct pfioc_ruleset */ 
1811                 struct pfioc_ruleset 
*pr 
= NULL
; 
1813                 PFIOC_STRUCT_BEGIN(addr
, pr
, error 
= ENOMEM
; break; ); 
1814                 error 
= pfioctl_ioc_ruleset(cmd
, pr
, p
); 
1815                 PFIOC_STRUCT_END(pr
, addr
); 
1819         case DIOCRCLRTABLES
:            /* struct pfioc_table */ 
1820         case DIOCRADDTABLES
:            /* struct pfioc_table */ 
1821         case DIOCRDELTABLES
:            /* struct pfioc_table */ 
1822         case DIOCRGETTABLES
:            /* struct pfioc_table */ 
1823         case DIOCRGETTSTATS
:            /* struct pfioc_table */ 
1824         case DIOCRCLRTSTATS
:            /* struct pfioc_table */ 
1825         case DIOCRSETTFLAGS
:            /* struct pfioc_table */ 
1826         case DIOCRCLRADDRS
:             /* struct pfioc_table */ 
1827         case DIOCRADDADDRS
:             /* struct pfioc_table */ 
1828         case DIOCRDELADDRS
:             /* struct pfioc_table */ 
1829         case DIOCRSETADDRS
:             /* struct pfioc_table */ 
1830         case DIOCRGETADDRS
:             /* struct pfioc_table */ 
1831         case DIOCRGETASTATS
:            /* struct pfioc_table */ 
1832         case DIOCRCLRASTATS
:            /* struct pfioc_table */ 
1833         case DIOCRTSTADDRS
:             /* struct pfioc_table */ 
1834         case DIOCRINADEFINE
: {          /* struct pfioc_table */ 
1835                 PFIOCX_STRUCT_DECL(pfioc_table
); 
1837                 PFIOCX_STRUCT_BEGIN(addr
, pfioc_table
, error 
= ENOMEM
; break; ); 
1838                 error 
= pfioctl_ioc_table(cmd
, 
1839                     PFIOCX_STRUCT_ADDR32(pfioc_table
), 
1840                     PFIOCX_STRUCT_ADDR64(pfioc_table
), p
); 
1841                 PFIOCX_STRUCT_END(pfioc_table
, addr
); 
1845         case DIOCOSFPADD
:               /* struct pf_osfp_ioctl */ 
1846         case DIOCOSFPGET
: {             /* struct pf_osfp_ioctl */ 
1847                 struct pf_osfp_ioctl 
*io 
= NULL
; 
1849                 PFIOC_STRUCT_BEGIN(addr
, io
, error 
= ENOMEM
; break; ); 
1850                 if (cmd 
== DIOCOSFPADD
) { 
1851                         error 
= pf_osfp_add(io
); 
1853                         VERIFY(cmd 
== DIOCOSFPGET
); 
1854                         error 
= pf_osfp_get(io
); 
1856                 PFIOC_STRUCT_END(io
, addr
); 
1860         case DIOCXBEGIN
:                /* struct pfioc_trans */ 
1861         case DIOCXROLLBACK
:             /* struct pfioc_trans */ 
1862         case DIOCXCOMMIT
: {             /* struct pfioc_trans */ 
1863                 PFIOCX_STRUCT_DECL(pfioc_trans
); 
1865                 PFIOCX_STRUCT_BEGIN(addr
, pfioc_trans
, error 
= ENOMEM
; break; ); 
1866                 error 
= pfioctl_ioc_trans(cmd
, 
1867                     PFIOCX_STRUCT_ADDR32(pfioc_trans
), 
1868                     PFIOCX_STRUCT_ADDR64(pfioc_trans
), p
); 
1869                 PFIOCX_STRUCT_END(pfioc_trans
, addr
); 
1873         case DIOCGETSRCNODES
: {         /* struct pfioc_src_nodes */ 
1874                 PFIOCX_STRUCT_DECL(pfioc_src_nodes
); 
1876                 PFIOCX_STRUCT_BEGIN(addr
, pfioc_src_nodes
, 
1877                     error 
= ENOMEM
; break; ); 
1878                 error 
= pfioctl_ioc_src_nodes(cmd
, 
1879                     PFIOCX_STRUCT_ADDR32(pfioc_src_nodes
), 
1880                     PFIOCX_STRUCT_ADDR64(pfioc_src_nodes
), p
); 
1881                 PFIOCX_STRUCT_END(pfioc_src_nodes
, addr
); 
1885         case DIOCCLRSRCNODES
: { 
1886                 struct pf_src_node      
*n
; 
1887                 struct pf_state         
*state
; 
1889                 RB_FOREACH(state
, pf_state_tree_id
, &tree_id
) { 
1890                         state
->src_node 
= NULL
; 
1891                         state
->nat_src_node 
= NULL
; 
1893                 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) { 
1897                 pf_purge_expired_src_nodes(); 
1898                 pf_status
.src_nodes 
= 0; 
1902         case DIOCKILLSRCNODES
: {        /* struct pfioc_src_node_kill */ 
1903                 struct pfioc_src_node_kill 
*psnk 
= NULL
; 
1905                 PFIOC_STRUCT_BEGIN(addr
, psnk
, error 
= ENOMEM
; break; ); 
1906                 error 
= pfioctl_ioc_src_node_kill(cmd
, psnk
, p
); 
1907                 PFIOC_STRUCT_END(psnk
, addr
); 
1911         case DIOCSETHOSTID
: {           /* u_int32_t */ 
1914                 /* small enough to be on stack */ 
1915                 bcopy(addr
, &hid
, sizeof(hid
)); 
1917                         pf_status
.hostid 
= random(); 
1919                         pf_status
.hostid 
= hid
; 
1928         case DIOCIGETIFACES
:            /* struct pfioc_iface */ 
1929         case DIOCSETIFFLAG
:             /* struct pfioc_iface */ 
1930         case DIOCCLRIFFLAG
: {           /* struct pfioc_iface */ 
1931                 PFIOCX_STRUCT_DECL(pfioc_iface
); 
1933                 PFIOCX_STRUCT_BEGIN(addr
, pfioc_iface
, error 
= ENOMEM
; break; ); 
1934                 error 
= pfioctl_ioc_iface(cmd
, 
1935                     PFIOCX_STRUCT_ADDR32(pfioc_iface
), 
1936                     PFIOCX_STRUCT_ADDR64(pfioc_iface
), p
); 
1937                 PFIOCX_STRUCT_END(pfioc_iface
, addr
); 
1946         lck_mtx_unlock(pf_lock
); 
1947         lck_rw_done(pf_perim_lock
); 
1953 pfioctl_ioc_table(u_long cmd
, struct pfioc_table_32 
*io32
, 
1954     struct pfioc_table_64 
*io64
, struct proc 
*p
) 
1956         int p64 
= proc_is64bit(p
); 
1965          * 64-bit structure processing 
1968         case DIOCRCLRTABLES
: 
1969                 if (io64
->pfrio_esize 
!= 0) { 
1973                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
1974                 error 
= pfr_clr_tables(&io64
->pfrio_table
, &io64
->pfrio_ndel
, 
1975                     io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
1978         case DIOCRADDTABLES
: 
1979                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
1983                 error 
= pfr_add_tables(io64
->pfrio_buffer
, io64
->pfrio_size
, 
1984                     &io64
->pfrio_nadd
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
1987         case DIOCRDELTABLES
: 
1988                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
1992                 error 
= pfr_del_tables(io64
->pfrio_buffer
, io64
->pfrio_size
, 
1993                     &io64
->pfrio_ndel
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
1996         case DIOCRGETTABLES
: 
1997                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2001                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2002                 error 
= pfr_get_tables(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2003                     &io64
->pfrio_size
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2006         case DIOCRGETTSTATS
: 
2007                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_tstats
)) { 
2011                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2012                 error 
= pfr_get_tstats(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2013                     &io64
->pfrio_size
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2016         case DIOCRCLRTSTATS
: 
2017                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2021                 error 
= pfr_clr_tstats(io64
->pfrio_buffer
, io64
->pfrio_size
, 
2022                     &io64
->pfrio_nzero
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2025         case DIOCRSETTFLAGS
: 
2026                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2030                 error 
= pfr_set_tflags(io64
->pfrio_buffer
, io64
->pfrio_size
, 
2031                     io64
->pfrio_setflag
, io64
->pfrio_clrflag
, 
2032                     &io64
->pfrio_nchange
, &io64
->pfrio_ndel
, 
2033                     io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2037                 if (io64
->pfrio_esize 
!= 0) { 
2041                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2042                 error 
= pfr_clr_addrs(&io64
->pfrio_table
, &io64
->pfrio_ndel
, 
2043                     io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2047                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2051                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2052                 error 
= pfr_add_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2053                     io64
->pfrio_size
, &io64
->pfrio_nadd
, io64
->pfrio_flags 
| 
2054                     PFR_FLAG_USERIOCTL
); 
2058                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2062                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2063                 error 
= pfr_del_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2064                     io64
->pfrio_size
, &io64
->pfrio_ndel
, io64
->pfrio_flags 
| 
2065                     PFR_FLAG_USERIOCTL
); 
2069                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2073                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2074                 error 
= pfr_set_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2075                     io64
->pfrio_size
, &io64
->pfrio_size2
, &io64
->pfrio_nadd
, 
2076                     &io64
->pfrio_ndel
, &io64
->pfrio_nchange
, io64
->pfrio_flags 
| 
2077                     PFR_FLAG_USERIOCTL
, 0); 
2081                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2085                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2086                 error 
= pfr_get_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2087                     &io64
->pfrio_size
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2090         case DIOCRGETASTATS
: 
2091                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_astats
)) { 
2095                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2096                 error 
= pfr_get_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2097                     &io64
->pfrio_size
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2100         case DIOCRCLRASTATS
: 
2101                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2105                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2106                 error 
= pfr_clr_astats(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2107                     io64
->pfrio_size
, &io64
->pfrio_nzero
, io64
->pfrio_flags 
| 
2108                     PFR_FLAG_USERIOCTL
); 
2112                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2116                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2117                 error 
= pfr_tst_addrs(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2118                     io64
->pfrio_size
, &io64
->pfrio_nmatch
, io64
->pfrio_flags 
| 
2119                     PFR_FLAG_USERIOCTL
); 
2122         case DIOCRINADEFINE
: 
2123                 if (io64
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2127                 pfr_table_copyin_cleanup(&io64
->pfrio_table
); 
2128                 error 
= pfr_ina_define(&io64
->pfrio_table
, io64
->pfrio_buffer
, 
2129                     io64
->pfrio_size
, &io64
->pfrio_nadd
, &io64
->pfrio_naddr
, 
2130                     io64
->pfrio_ticket
, io64
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2139 #pragma unused(io64) 
2140 #endif /* __LP64__ */ 
2144          * 32-bit structure processing 
2147         case DIOCRCLRTABLES
: 
2148                 if (io32
->pfrio_esize 
!= 0) { 
2152                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2153                 error 
= pfr_clr_tables(&io32
->pfrio_table
, &io32
->pfrio_ndel
, 
2154                     io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2157         case DIOCRADDTABLES
: 
2158                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2162                 error 
= pfr_add_tables(io32
->pfrio_buffer
, io32
->pfrio_size
, 
2163                     &io32
->pfrio_nadd
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2166         case DIOCRDELTABLES
: 
2167                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2171                 error 
= pfr_del_tables(io32
->pfrio_buffer
, io32
->pfrio_size
, 
2172                     &io32
->pfrio_ndel
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2175         case DIOCRGETTABLES
: 
2176                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2180                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2181                 error 
= pfr_get_tables(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2182                     &io32
->pfrio_size
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2185         case DIOCRGETTSTATS
: 
2186                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_tstats
)) { 
2190                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2191                 error 
= pfr_get_tstats(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2192                     &io32
->pfrio_size
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2195         case DIOCRCLRTSTATS
: 
2196                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2200                 error 
= pfr_clr_tstats(io32
->pfrio_buffer
, io32
->pfrio_size
, 
2201                     &io32
->pfrio_nzero
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2204         case DIOCRSETTFLAGS
: 
2205                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_table
)) { 
2209                 error 
= pfr_set_tflags(io32
->pfrio_buffer
, io32
->pfrio_size
, 
2210                     io32
->pfrio_setflag
, io32
->pfrio_clrflag
, 
2211                     &io32
->pfrio_nchange
, &io32
->pfrio_ndel
, 
2212                     io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2216                 if (io32
->pfrio_esize 
!= 0) { 
2220                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2221                 error 
= pfr_clr_addrs(&io32
->pfrio_table
, &io32
->pfrio_ndel
, 
2222                     io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2226                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2230                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2231                 error 
= pfr_add_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2232                     io32
->pfrio_size
, &io32
->pfrio_nadd
, io32
->pfrio_flags 
| 
2233                     PFR_FLAG_USERIOCTL
); 
2237                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2241                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2242                 error 
= pfr_del_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2243                     io32
->pfrio_size
, &io32
->pfrio_ndel
, io32
->pfrio_flags 
| 
2244                     PFR_FLAG_USERIOCTL
); 
2248                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2252                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2253                 error 
= pfr_set_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2254                     io32
->pfrio_size
, &io32
->pfrio_size2
, &io32
->pfrio_nadd
, 
2255                     &io32
->pfrio_ndel
, &io32
->pfrio_nchange
, io32
->pfrio_flags 
| 
2256                     PFR_FLAG_USERIOCTL
, 0); 
2260                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2264                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2265                 error 
= pfr_get_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2266                     &io32
->pfrio_size
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2269         case DIOCRGETASTATS
: 
2270                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_astats
)) { 
2274                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2275                 error 
= pfr_get_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2276                     &io32
->pfrio_size
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2279         case DIOCRCLRASTATS
: 
2280                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2284                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2285                 error 
= pfr_clr_astats(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2286                     io32
->pfrio_size
, &io32
->pfrio_nzero
, io32
->pfrio_flags 
| 
2287                     PFR_FLAG_USERIOCTL
); 
2291                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2295                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2296                 error 
= pfr_tst_addrs(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2297                     io32
->pfrio_size
, &io32
->pfrio_nmatch
, io32
->pfrio_flags 
| 
2298                     PFR_FLAG_USERIOCTL
); 
2301         case DIOCRINADEFINE
: 
2302                 if (io32
->pfrio_esize 
!= sizeof(struct pfr_addr
)) { 
2306                 pfr_table_copyin_cleanup(&io32
->pfrio_table
); 
2307                 error 
= pfr_ina_define(&io32
->pfrio_table
, io32
->pfrio_buffer
, 
2308                     io32
->pfrio_size
, &io32
->pfrio_nadd
, &io32
->pfrio_naddr
, 
2309                     io32
->pfrio_ticket
, io32
->pfrio_flags 
| PFR_FLAG_USERIOCTL
); 
2323 pfioctl_ioc_tokens(u_long cmd
, struct pfioc_tokens_32 
*tok32
, 
2324     struct pfioc_tokens_64 
*tok64
, struct proc 
*p
) 
2326         struct pfioc_token 
*tokens
; 
2327         struct pfioc_kernel_token 
*entry
, *tmp
; 
2328         user_addr_t token_buf
; 
2329         int ocnt
, cnt
, error 
= 0, p64 
= proc_is64bit(p
); 
2333         case DIOCGETSTARTERS
: { 
2336                 if (nr_tokens 
== 0) { 
2341                 size 
= sizeof(struct pfioc_token
) * nr_tokens
; 
2342                 if (size 
/ nr_tokens 
!= sizeof(struct pfioc_token
)) { 
2343                         os_log_error(OS_LOG_DEFAULT
, "%s: size overflows", __func__
); 
2347                 ocnt 
= cnt 
= (p64 
? tok64
->size 
: tok32
->size
); 
2358                 token_buf 
= (p64 
? tok64
->pgt_buf 
: tok32
->pgt_buf
); 
2360                 token_buf 
= tok32
->pgt_buf
; 
2362                 tokens 
= _MALLOC(size
, M_TEMP
, M_WAITOK 
| M_ZERO
); 
2363                 if (tokens 
== NULL
) { 
2368                 ptr 
= (void *)tokens
; 
2369                 SLIST_FOREACH_SAFE(entry
, &token_list_head
, next
, tmp
) { 
2370                         struct pfioc_token 
*t
; 
2372                         if ((unsigned)cnt 
< sizeof(*tokens
)) { 
2373                                 break;    /* no more buffer space left */ 
2375                         t 
= (struct pfioc_token 
*)(void *)ptr
; 
2376                         t
->token_value  
= entry
->token
.token_value
; 
2377                         t
->timestamp    
= entry
->token
.timestamp
; 
2378                         t
->pid          
= entry
->token
.pid
; 
2379                         bcopy(entry
->token
.proc_name
, t
->proc_name
, 
2380                             PFTOK_PROCNAME_LEN
); 
2381                         ptr 
+= sizeof(struct pfioc_token
); 
2383                         cnt 
-= sizeof(struct pfioc_token
); 
2387                         error 
= copyout(tokens
, token_buf
, ocnt 
- cnt
); 
2391                         tok64
->size 
= ocnt 
- cnt
; 
2393                         tok32
->size 
= ocnt 
- cnt
; 
2396                 _FREE(tokens
, M_TEMP
); 
2409 pf_expire_states_and_src_nodes(struct pf_rule 
*rule
) 
2411         struct pf_state         
*state
; 
2412         struct pf_src_node      
*sn
; 
2415         /* expire the states */ 
2416         state 
= TAILQ_FIRST(&state_list
); 
2418                 if (state
->rule
.ptr 
== rule
) { 
2419                         state
->timeout 
= PFTM_PURGE
; 
2421                 state 
= TAILQ_NEXT(state
, entry_list
); 
2423         pf_purge_expired_states(pf_status
.states
); 
2425         /* expire the src_nodes */ 
2426         RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) { 
2427                 if (sn
->rule
.ptr 
!= rule
) { 
2430                 if (sn
->states 
!= 0) { 
2431                         RB_FOREACH(state
, pf_state_tree_id
, 
2433                                 if (state
->src_node 
== sn
) { 
2434                                         state
->src_node 
= NULL
; 
2436                                 if (state
->nat_src_node 
== sn
) { 
2437                                         state
->nat_src_node 
= NULL
; 
2446                 pf_purge_expired_src_nodes(); 
2451 pf_delete_rule_from_ruleset(struct pf_ruleset 
*ruleset
, int rs_num
, 
2452     struct pf_rule 
*rule
) 
2457         pf_expire_states_and_src_nodes(rule
); 
2459         pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, rule
); 
2460         if (ruleset
->rules
[rs_num
].active
.rcount
-- == 0) { 
2461                 panic("%s: rcount value broken!", __func__
); 
2463         r 
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
); 
2467                 r 
= TAILQ_NEXT(r
, entries
); 
2473 pf_ruleset_cleanup(struct pf_ruleset 
*ruleset
, int rs
) 
2475         pf_calc_skip_steps(ruleset
->rules
[rs
].active
.ptr
); 
2476         ruleset
->rules
[rs
].active
.ticket 
= 
2477             ++ruleset
->rules
[rs
].inactive
.ticket
; 
2481  * req_dev encodes the PF interface. Currently, possible values are 
2485 pf_delete_rule_by_ticket(struct pfioc_rule 
*pr
, u_int32_t req_dev
) 
2487         struct pf_ruleset       
*ruleset
; 
2488         struct pf_rule          
*rule 
= NULL
; 
2493         is_anchor 
= (pr
->anchor_call
[0] != '\0'); 
2494         if ((ruleset 
= pf_find_ruleset_with_owner(pr
->anchor
, 
2495             pr
->rule
.owner
, is_anchor
, &error
)) == NULL
) { 
2499         for (i 
= 0; i 
< PF_RULESET_MAX 
&& rule 
== NULL
; i
++) { 
2500                 rule 
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
); 
2501                 while (rule 
&& (rule
->ticket 
!= pr
->rule
.ticket
)) { 
2502                         rule 
= TAILQ_NEXT(rule
, entries
); 
2511         if (strcmp(rule
->owner
, pr
->rule
.owner
)) { 
2516         if (rule
->anchor 
&& (ruleset 
!= &pf_main_ruleset
) && 
2517             ((strcmp(ruleset
->anchor
->owner
, "")) == 0) && 
2518             ((ruleset
->rules
[i
].active
.rcount 
- 1) == 0)) { 
2519                 /* set rule & ruleset to parent and repeat */ 
2520                 struct pf_rule 
*delete_rule 
= rule
; 
2521                 struct pf_ruleset 
*delete_ruleset 
= ruleset
; 
2523 #define parent_ruleset          ruleset->anchor->parent->ruleset 
2524                 if (ruleset
->anchor
->parent 
== NULL
) { 
2525                         ruleset 
= &pf_main_ruleset
; 
2527                         ruleset 
= &parent_ruleset
; 
2530                 rule 
= TAILQ_FIRST(ruleset
->rules
[i
].active
.ptr
); 
2532                     (rule
->anchor 
!= delete_ruleset
->anchor
)) { 
2533                         rule 
= TAILQ_NEXT(rule
, entries
); 
2536                         panic("%s: rule not found!", __func__
); 
2540                  * if reqest device != rule's device, bail : 
2541                  * with error if ticket matches; 
2542                  * without error if ticket doesn't match (i.e. its just cleanup) 
2544                 if ((rule
->rule_flag 
& PFRULE_PFM
) ^ req_dev
) { 
2545                         if (rule
->ticket 
!= pr
->rule
.ticket
) { 
2552                 if (delete_rule
->rule_flag 
& PFRULE_PFM
) { 
2556                 pf_delete_rule_from_ruleset(delete_ruleset
, 
2558                 delete_ruleset
->rules
[i
].active
.ticket 
= 
2559                     ++delete_ruleset
->rules
[i
].inactive
.ticket
; 
2563                  * process deleting rule only if device that added the 
2564                  * rule matches device that issued the request 
2566                 if ((rule
->rule_flag 
& PFRULE_PFM
) ^ req_dev
) { 
2569                 if (rule
->rule_flag 
& PFRULE_PFM
) { 
2572                 pf_delete_rule_from_ruleset(ruleset
, i
, 
2574                 pf_ruleset_cleanup(ruleset
, i
); 
2581  * req_dev encodes the PF interface. Currently, possible values are 
2585 pf_delete_rule_by_owner(char *owner
, u_int32_t req_dev
) 
2587         struct pf_ruleset       
*ruleset
; 
2588         struct pf_rule          
*rule
, *next
; 
2591         for (int rs 
= 0; rs 
< PF_RULESET_MAX
; rs
++) { 
2592                 rule 
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs
].active
.ptr
); 
2593                 ruleset 
= &pf_main_ruleset
; 
2595                         next 
= TAILQ_NEXT(rule
, entries
); 
2597                          * process deleting rule only if device that added the 
2598                          * rule matches device that issued the request 
2600                         if ((rule
->rule_flag 
& PFRULE_PFM
) ^ req_dev
) { 
2605                                 if (((strcmp(rule
->owner
, owner
)) == 0) || 
2606                                     ((strcmp(rule
->owner
, "")) == 0)) { 
2607                                         if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount 
> 0) { 
2609                                                         pf_ruleset_cleanup(ruleset
, rs
); 
2612                                                 /* step into anchor */ 
2614                                                     &rule
->anchor
->ruleset
; 
2615                                                 rule 
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
); 
2618                                                 if (rule
->rule_flag 
& 
2622                                                 pf_delete_rule_from_ruleset(ruleset
, rs
, rule
); 
2630                                 if (((strcmp(rule
->owner
, owner
)) == 0)) { 
2632                                         if (rule
->rule_flag 
& PFRULE_PFM
) { 
2635                                         pf_delete_rule_from_ruleset(ruleset
, 
2643                                         pf_ruleset_cleanup(ruleset
, rs
); 
2646                                 if (ruleset 
!= &pf_main_ruleset
) { 
2647                                         pf_deleterule_anchor_step_out(&ruleset
, 
2656 pf_deleterule_anchor_step_out(struct pf_ruleset 
**ruleset_ptr
, 
2657     int rs
, struct pf_rule 
**rule_ptr
) 
2659         struct pf_ruleset 
*ruleset 
= *ruleset_ptr
; 
2660         struct pf_rule 
*rule 
= *rule_ptr
; 
2662         /* step out of anchor */ 
2663         struct pf_ruleset 
*rs_copy 
= ruleset
; 
2664         ruleset 
= ruleset
->anchor
->parent
? 
2665             &ruleset
->anchor
->parent
->ruleset
:&pf_main_ruleset
; 
2667         rule 
= TAILQ_FIRST(ruleset
->rules
[rs
].active
.ptr
); 
2668         while (rule 
&& (rule
->anchor 
!= rs_copy
->anchor
)) { 
2669                 rule 
= TAILQ_NEXT(rule
, entries
); 
2672                 panic("%s: parent rule of anchor not found!", __func__
); 
2674         if (rule
->anchor
->ruleset
.rules
[rs
].active
.rcount 
> 0) { 
2675                 rule 
= TAILQ_NEXT(rule
, entries
); 
2678         *ruleset_ptr 
= ruleset
; 
2683 pf_addrwrap_setup(struct pf_addr_wrap 
*aw
) 
2686         bzero(&aw
->p
, sizeof aw
->p
); 
2690 pf_rule_setup(struct pfioc_rule 
*pr
, struct pf_rule 
*rule
, 
2691     struct pf_ruleset 
*ruleset
) 
2693         struct pf_pooladdr      
*apa
; 
2696         if (rule
->ifname
[0]) { 
2697                 rule
->kif 
= pfi_kif_get(rule
->ifname
); 
2698                 if (rule
->kif 
== NULL
) { 
2699                         pool_put(&pf_rule_pl
, rule
); 
2702                 pfi_kif_ref(rule
->kif
, PFI_KIF_REF_RULE
); 
2704         if (rule
->tagname
[0]) { 
2705                 if ((rule
->tag 
= pf_tagname2tag(rule
->tagname
)) == 0) { 
2709         if (rule
->match_tagname
[0]) { 
2710                 if ((rule
->match_tag 
= 
2711                     pf_tagname2tag(rule
->match_tagname
)) == 0) { 
2715         if (rule
->rt 
&& !rule
->direction
) { 
2722         if (rule
->logif 
>= PFLOGIFS_MAX
) { 
2726         pf_addrwrap_setup(&rule
->src
.addr
); 
2727         pf_addrwrap_setup(&rule
->dst
.addr
); 
2728         if (pf_rtlabel_add(&rule
->src
.addr
) || 
2729             pf_rtlabel_add(&rule
->dst
.addr
)) { 
2732         if (pfi_dynaddr_setup(&rule
->src
.addr
, rule
->af
)) { 
2735         if (pfi_dynaddr_setup(&rule
->dst
.addr
, rule
->af
)) { 
2738         if (pf_tbladdr_setup(ruleset
, &rule
->src
.addr
)) { 
2741         if (pf_tbladdr_setup(ruleset
, &rule
->dst
.addr
)) { 
2744         if (pf_anchor_setup(rule
, ruleset
, pr
->anchor_call
)) { 
2747         TAILQ_FOREACH(apa
, &pf_pabuf
, entries
) 
2748         if (pf_tbladdr_setup(ruleset
, &apa
->addr
)) { 
2752         if (rule
->overload_tblname
[0]) { 
2753                 if ((rule
->overload_tbl 
= pfr_attach_table(ruleset
, 
2754                     rule
->overload_tblname
)) == NULL
) { 
2757                         rule
->overload_tbl
->pfrkt_flags 
|= 
2762         pf_mv_pool(&pf_pabuf
, &rule
->rpool
.list
); 
2764         if (((((rule
->action 
== PF_NAT
) || (rule
->action 
== PF_RDR
) || 
2765             (rule
->action 
== PF_BINAT
) || (rule
->action 
== PF_NAT64
)) && 
2766             rule
->anchor 
== NULL
) || 
2767             (rule
->rt 
> PF_FASTROUTE
)) && 
2768             (TAILQ_FIRST(&rule
->rpool
.list
) == NULL
)) { 
2773                 pf_rm_rule(NULL
, rule
); 
2776         /* For a NAT64 rule the rule's address family is AF_INET6 whereas 
2777          * the address pool's family will be AF_INET 
2779         rule
->rpool
.af 
= (rule
->action 
== PF_NAT64
) ? AF_INET
: rule
->af
; 
2780         rule
->rpool
.cur 
= TAILQ_FIRST(&rule
->rpool
.list
); 
2781         rule
->evaluations 
= rule
->packets
[0] = rule
->packets
[1] = 
2782             rule
->bytes
[0] = rule
->bytes
[1] = 0; 
2788 pfioctl_ioc_rule(u_long cmd
, int minordev
, struct pfioc_rule 
*pr
, struct proc 
*p
) 
2791         u_int32_t req_dev 
= 0; 
2795                 struct pf_ruleset       
*ruleset
; 
2796                 struct pf_rule          
*rule
, *tail
; 
2799                 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0'; 
2800                 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0'; 
2801                 ruleset 
= pf_find_ruleset(pr
->anchor
); 
2802                 if (ruleset 
== NULL
) { 
2806                 rs_num 
= pf_get_ruleset_number(pr
->rule
.action
); 
2807                 if (rs_num 
>= PF_RULESET_MAX
) { 
2811                 if (pr
->rule
.return_icmp 
>> 8 > ICMP_MAXTYPE
) { 
2815                 if (pr
->ticket 
!= ruleset
->rules
[rs_num
].inactive
.ticket
) { 
2819                 if (pr
->pool_ticket 
!= ticket_pabuf
) { 
2823                 rule 
= pool_get(&pf_rule_pl
, PR_WAITOK
); 
2828                 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
); 
2830                 if (rule
->af 
== AF_INET
) { 
2831                         pool_put(&pf_rule_pl
, rule
); 
2832                         error 
= EAFNOSUPPORT
; 
2836                 tail 
= TAILQ_LAST(ruleset
->rules
[rs_num
].inactive
.ptr
, 
2839                         rule
->nr 
= tail
->nr 
+ 1; 
2844                 if ((error 
= pf_rule_setup(pr
, rule
, ruleset
))) { 
2848                 TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].inactive
.ptr
, 
2850                 ruleset
->rules
[rs_num
].inactive
.rcount
++; 
2851                 if (rule
->rule_flag 
& PFRULE_PFM
) { 
2855                 if (rule
->action 
== PF_NAT64
) { 
2856                         atomic_add_16(&pf_nat64_configured
, 1); 
2859                 if (pr
->anchor_call
[0] == '\0') { 
2860                         INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_total
); 
2861                         if (rule
->rule_flag 
& PFRULE_PFM
) { 
2862                                 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_os
); 
2867                 if (rule
->action 
== PF_DUMMYNET
) { 
2868                         struct dummynet_event dn_event
; 
2869                         uint32_t direction 
= DN_INOUT
;; 
2870                         bzero(&dn_event
, sizeof(dn_event
)); 
2872                         dn_event
.dn_event_code 
= DUMMYNET_RULE_CONFIG
; 
2874                         if (rule
->direction 
== PF_IN
) { 
2876                         } else if (rule
->direction 
== PF_OUT
) { 
2880                         dn_event
.dn_event_rule_config
.dir 
= direction
; 
2881                         dn_event
.dn_event_rule_config
.af 
= rule
->af
; 
2882                         dn_event
.dn_event_rule_config
.proto 
= rule
->proto
; 
2883                         dn_event
.dn_event_rule_config
.src_port 
= rule
->src
.xport
.range
.port
[0]; 
2884                         dn_event
.dn_event_rule_config
.dst_port 
= rule
->dst
.xport
.range
.port
[0]; 
2885                         strlcpy(dn_event
.dn_event_rule_config
.ifname
, rule
->ifname
, 
2886                             sizeof(dn_event
.dn_event_rule_config
.ifname
)); 
2888                         dummynet_event_enqueue_nwk_wq_entry(&dn_event
); 
2894         case DIOCGETRULES
: { 
2895                 struct pf_ruleset       
*ruleset
; 
2896                 struct pf_rule          
*tail
; 
2899                 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0'; 
2900                 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0'; 
2901                 ruleset 
= pf_find_ruleset(pr
->anchor
); 
2902                 if (ruleset 
== NULL
) { 
2906                 rs_num 
= pf_get_ruleset_number(pr
->rule
.action
); 
2907                 if (rs_num 
>= PF_RULESET_MAX
) { 
2911                 tail 
= TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
, 
2914                         pr
->nr 
= tail
->nr 
+ 1; 
2918                 pr
->ticket 
= ruleset
->rules
[rs_num
].active
.ticket
; 
2923                 struct pf_ruleset       
*ruleset
; 
2924                 struct pf_rule          
*rule
; 
2927                 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0'; 
2928                 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0'; 
2929                 ruleset 
= pf_find_ruleset(pr
->anchor
); 
2930                 if (ruleset 
== NULL
) { 
2934                 rs_num 
= pf_get_ruleset_number(pr
->rule
.action
); 
2935                 if (rs_num 
>= PF_RULESET_MAX
) { 
2939                 if (pr
->ticket 
!= ruleset
->rules
[rs_num
].active
.ticket
) { 
2943                 rule 
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
); 
2944                 while ((rule 
!= NULL
) && (rule
->nr 
!= pr
->nr
)) { 
2945                         rule 
= TAILQ_NEXT(rule
, entries
); 
2951                 pf_rule_copyout(rule
, &pr
->rule
); 
2952                 if (pf_anchor_copyout(ruleset
, rule
, pr
)) { 
2956                 pfi_dynaddr_copyout(&pr
->rule
.src
.addr
); 
2957                 pfi_dynaddr_copyout(&pr
->rule
.dst
.addr
); 
2958                 pf_tbladdr_copyout(&pr
->rule
.src
.addr
); 
2959                 pf_tbladdr_copyout(&pr
->rule
.dst
.addr
); 
2960                 pf_rtlabel_copyout(&pr
->rule
.src
.addr
); 
2961                 pf_rtlabel_copyout(&pr
->rule
.dst
.addr
); 
2962                 for (i 
= 0; i 
< PF_SKIP_COUNT
; ++i
) { 
2963                         if (rule
->skip
[i
].ptr 
== NULL
) { 
2964                                 pr
->rule
.skip
[i
].nr 
= -1; 
2966                                 pr
->rule
.skip
[i
].nr 
= 
2967                                     rule
->skip
[i
].ptr
->nr
; 
2971                 if (pr
->action 
== PF_GET_CLR_CNTR
) { 
2972                         rule
->evaluations 
= 0; 
2973                         rule
->packets
[0] = rule
->packets
[1] = 0; 
2974                         rule
->bytes
[0] = rule
->bytes
[1] = 0; 
2979         case DIOCCHANGERULE
: { 
2980                 struct pfioc_rule       
*pcr 
= pr
; 
2981                 struct pf_ruleset       
*ruleset
; 
2982                 struct pf_rule          
*oldrule 
= NULL
, *newrule 
= NULL
; 
2983                 struct pf_pooladdr      
*pa
; 
2987                 if (!(pcr
->action 
== PF_CHANGE_REMOVE 
|| 
2988                     pcr
->action 
== PF_CHANGE_GET_TICKET
) && 
2989                     pcr
->pool_ticket 
!= ticket_pabuf
) { 
2994                 if (pcr
->action 
< PF_CHANGE_ADD_HEAD 
|| 
2995                     pcr
->action 
> PF_CHANGE_GET_TICKET
) { 
2999                 pcr
->anchor
[sizeof(pcr
->anchor
) - 1] = '\0'; 
3000                 pcr
->anchor_call
[sizeof(pcr
->anchor_call
) - 1] = '\0'; 
3001                 ruleset 
= pf_find_ruleset(pcr
->anchor
); 
3002                 if (ruleset 
== NULL
) { 
3006                 rs_num 
= pf_get_ruleset_number(pcr
->rule
.action
); 
3007                 if (rs_num 
>= PF_RULESET_MAX
) { 
3012                 if (pcr
->action 
== PF_CHANGE_GET_TICKET
) { 
3013                         pcr
->ticket 
= ++ruleset
->rules
[rs_num
].active
.ticket
; 
3017                             ruleset
->rules
[rs_num
].active
.ticket
) { 
3021                         if (pcr
->rule
.return_icmp 
>> 8 > ICMP_MAXTYPE
) { 
3027                 if (pcr
->action 
!= PF_CHANGE_REMOVE
) { 
3028                         newrule 
= pool_get(&pf_rule_pl
, PR_WAITOK
); 
3029                         if (newrule 
== NULL
) { 
3033                         pf_rule_copyin(&pcr
->rule
, newrule
, p
, minordev
); 
3035                         if (newrule
->af 
== AF_INET
) { 
3036                                 pool_put(&pf_rule_pl
, newrule
); 
3037                                 error 
= EAFNOSUPPORT
; 
3041                         if (newrule
->ifname
[0]) { 
3042                                 newrule
->kif 
= pfi_kif_get(newrule
->ifname
); 
3043                                 if (newrule
->kif 
== NULL
) { 
3044                                         pool_put(&pf_rule_pl
, newrule
); 
3048                                 pfi_kif_ref(newrule
->kif
, PFI_KIF_REF_RULE
); 
3050                                 newrule
->kif 
= NULL
; 
3053                         if (newrule
->tagname
[0]) { 
3055                                     pf_tagname2tag(newrule
->tagname
)) == 0) { 
3059                         if (newrule
->match_tagname
[0]) { 
3060                                 if ((newrule
->match_tag 
= pf_tagname2tag( 
3061                                             newrule
->match_tagname
)) == 0) { 
3065                         if (newrule
->rt 
&& !newrule
->direction
) { 
3069                         if (!newrule
->log
) { 
3072                         if (newrule
->logif 
>= PFLOGIFS_MAX
) { 
3076                         pf_addrwrap_setup(&newrule
->src
.addr
); 
3077                         pf_addrwrap_setup(&newrule
->dst
.addr
); 
3078                         if (pf_rtlabel_add(&newrule
->src
.addr
) || 
3079                             pf_rtlabel_add(&newrule
->dst
.addr
)) { 
3082                         if (pfi_dynaddr_setup(&newrule
->src
.addr
, newrule
->af
)) { 
3085                         if (pfi_dynaddr_setup(&newrule
->dst
.addr
, newrule
->af
)) { 
3088                         if (pf_tbladdr_setup(ruleset
, &newrule
->src
.addr
)) { 
3091                         if (pf_tbladdr_setup(ruleset
, &newrule
->dst
.addr
)) { 
3094                         if (pf_anchor_setup(newrule
, ruleset
, pcr
->anchor_call
)) { 
3097                         TAILQ_FOREACH(pa
, &pf_pabuf
, entries
) 
3098                         if (pf_tbladdr_setup(ruleset
, &pa
->addr
)) { 
3102                         if (newrule
->overload_tblname
[0]) { 
3103                                 if ((newrule
->overload_tbl 
= pfr_attach_table( 
3104                                             ruleset
, newrule
->overload_tblname
)) == 
3108                                         newrule
->overload_tbl
->pfrkt_flags 
|= 
3113                         pf_mv_pool(&pf_pabuf
, &newrule
->rpool
.list
); 
3114                         if (((((newrule
->action 
== PF_NAT
) || 
3115                             (newrule
->action 
== PF_RDR
) || 
3116                             (newrule
->action 
== PF_BINAT
) || 
3117                             (newrule
->rt 
> PF_FASTROUTE
)) && 
3118                             !newrule
->anchor
)) && 
3119                             (TAILQ_FIRST(&newrule
->rpool
.list
) == NULL
)) { 
3124                                 pf_rm_rule(NULL
, newrule
); 
3127                         newrule
->rpool
.cur 
= TAILQ_FIRST(&newrule
->rpool
.list
); 
3128                         newrule
->evaluations 
= 0; 
3129                         newrule
->packets
[0] = newrule
->packets
[1] = 0; 
3130                         newrule
->bytes
[0] = newrule
->bytes
[1] = 0; 
3132                 pf_empty_pool(&pf_pabuf
); 
3134                 if (pcr
->action 
== PF_CHANGE_ADD_HEAD
) { 
3135                         oldrule 
= TAILQ_FIRST( 
3136                                 ruleset
->rules
[rs_num
].active
.ptr
); 
3137                 } else if (pcr
->action 
== PF_CHANGE_ADD_TAIL
) { 
3138                         oldrule 
= TAILQ_LAST( 
3139                                 ruleset
->rules
[rs_num
].active
.ptr
, pf_rulequeue
); 
3141                         oldrule 
= TAILQ_FIRST( 
3142                                 ruleset
->rules
[rs_num
].active
.ptr
); 
3143                         while ((oldrule 
!= NULL
) && (oldrule
->nr 
!= pcr
->nr
)) { 
3144                                 oldrule 
= TAILQ_NEXT(oldrule
, entries
); 
3146                         if (oldrule 
== NULL
) { 
3147                                 if (newrule 
!= NULL
) { 
3148                                         pf_rm_rule(NULL
, newrule
); 
3155                 if (pcr
->action 
== PF_CHANGE_REMOVE
) { 
3156                         pf_rm_rule(ruleset
->rules
[rs_num
].active
.ptr
, oldrule
); 
3157                         ruleset
->rules
[rs_num
].active
.rcount
--; 
3159                         if (oldrule 
== NULL
) { 
3161                                         ruleset
->rules
[rs_num
].active
.ptr
, 
3163                         } else if (pcr
->action 
== PF_CHANGE_ADD_HEAD 
|| 
3164                             pcr
->action 
== PF_CHANGE_ADD_BEFORE
) { 
3165                                 TAILQ_INSERT_BEFORE(oldrule
, newrule
, entries
); 
3168                                         ruleset
->rules
[rs_num
].active
.ptr
, 
3169                                         oldrule
, newrule
, entries
); 
3171                         ruleset
->rules
[rs_num
].active
.rcount
++; 
3175                 TAILQ_FOREACH(oldrule
, 
3176                     ruleset
->rules
[rs_num
].active
.ptr
, entries
) 
3179                 ruleset
->rules
[rs_num
].active
.ticket
++; 
3181                 pf_calc_skip_steps(ruleset
->rules
[rs_num
].active
.ptr
); 
3182                 pf_remove_if_empty_ruleset(ruleset
); 
3187         case DIOCINSERTRULE
: { 
3188                 struct pf_ruleset       
*ruleset
; 
3189                 struct pf_rule          
*rule
, *tail
, *r
; 
3193                 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0'; 
3194                 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0'; 
3195                 is_anchor 
= (pr
->anchor_call
[0] != '\0'); 
3197                 if ((ruleset 
= pf_find_ruleset_with_owner(pr
->anchor
, 
3198                     pr
->rule
.owner
, is_anchor
, &error
)) == NULL
) { 
3202                 rs_num 
= pf_get_ruleset_number(pr
->rule
.action
); 
3203                 if (rs_num 
>= PF_RULESET_MAX
) { 
3207                 if (pr
->rule
.return_icmp 
>> 8 > ICMP_MAXTYPE
) { 
3212                 /* make sure this anchor rule doesn't exist already */ 
3214                         r 
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
); 
3217                                     ((strcmp(r
->anchor
->name
, 
3218                                     pr
->anchor_call
)) == 0)) { 
3219                                         if (((strcmp(pr
->rule
.owner
, 
3221                                             ((strcmp(r
->owner
, "")) == 0)) { 
3228                                 r 
= TAILQ_NEXT(r
, entries
); 
3235                 rule 
= pool_get(&pf_rule_pl
, PR_WAITOK
); 
3240                 pf_rule_copyin(&pr
->rule
, rule
, p
, minordev
); 
3242                 if (rule
->af 
== AF_INET
) { 
3243                         pool_put(&pf_rule_pl
, rule
); 
3244                         error 
= EAFNOSUPPORT
; 
3248                 r 
= TAILQ_FIRST(ruleset
->rules
[rs_num
].active
.ptr
); 
3249                 while ((r 
!= NULL
) && (rule
->priority 
>= (unsigned)r
->priority
)) { 
3250                         r 
= TAILQ_NEXT(r
, entries
); 
3254                             TAILQ_LAST(ruleset
->rules
[rs_num
].active
.ptr
, 
3255                             pf_rulequeue
)) != NULL
) { 
3256                                 rule
->nr 
= tail
->nr 
+ 1; 
3264                 if ((error 
= pf_rule_setup(pr
, rule
, ruleset
))) { 
3268                 if (rule
->anchor 
!= NULL
) { 
3269                         strlcpy(rule
->anchor
->owner
, rule
->owner
, 
3270                             PF_OWNER_NAME_SIZE
); 
3274                         TAILQ_INSERT_BEFORE(r
, rule
, entries
); 
3275                         while (r 
&& ++r
->nr
) { 
3276                                 r 
= TAILQ_NEXT(r
, entries
); 
3279                         TAILQ_INSERT_TAIL(ruleset
->rules
[rs_num
].active
.ptr
, 
3282                 ruleset
->rules
[rs_num
].active
.rcount
++; 
3284                 /* Calculate checksum for the main ruleset */ 
3285                 if (ruleset 
== &pf_main_ruleset
) { 
3286                         error 
= pf_setup_pfsync_matching(ruleset
); 
3289                 pf_ruleset_cleanup(ruleset
, rs_num
); 
3290                 rule
->ticket 
= VM_KERNEL_ADDRPERM((u_int64_t
)(uintptr_t)rule
); 
3292                 pr
->rule
.ticket 
= rule
->ticket
; 
3293                 pf_rule_copyout(rule
, &pr
->rule
); 
3294                 if (rule
->rule_flag 
& PFRULE_PFM
) { 
3297                 if (rule
->action 
== PF_NAT64
) { 
3298                         atomic_add_16(&pf_nat64_configured
, 1); 
3301                 if (pr
->anchor_call
[0] == '\0') { 
3302                         INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_total
); 
3303                         if (rule
->rule_flag 
& PFRULE_PFM
) { 
3304                                 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_pf_addrule_os
); 
3310         case DIOCDELETERULE
: { 
3311                 pr
->anchor
[sizeof(pr
->anchor
) - 1] = '\0'; 
3312                 pr
->anchor_call
[sizeof(pr
->anchor_call
) - 1] = '\0'; 
3314                 if (pr
->rule
.return_icmp 
>> 8 > ICMP_MAXTYPE
) { 
3319                 /* get device through which request is made */ 
3320                 if ((uint8_t)minordev 
== PFDEV_PFM
) { 
3321                         req_dev 
|= PFRULE_PFM
; 
3324                 if (pr
->rule
.ticket
) { 
3325                         if ((error 
= pf_delete_rule_by_ticket(pr
, req_dev
))) { 
3329                         pf_delete_rule_by_owner(pr
->rule
.owner
, req_dev
); 
3332                 if (pr
->rule
.action 
== PF_NAT64
) { 
3333                         atomic_add_16(&pf_nat64_configured
, -1); 
3347 pfioctl_ioc_state_kill(u_long cmd
, struct pfioc_state_kill 
*psk
, struct proc 
*p
) 
3352         psk
->psk_ifname
[sizeof(psk
->psk_ifname
) - 1] = '\0'; 
3353         psk
->psk_ownername
[sizeof(psk
->psk_ownername
) - 1] = '\0'; 
3355         bool ifname_matched 
= true; 
3356         bool owner_matched 
= true; 
3359         case DIOCCLRSTATES
: { 
3360                 struct pf_state         
*s
, *nexts
; 
3363                 for (s 
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; s 
= nexts
) { 
3364                         nexts 
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
); 
3366                          * Purge all states only when neither ifname 
3367                          * or owner is provided. If any of these are provided 
3368                          * we purge only the states with meta data that match 
3370                         bool unlink_state 
= false; 
3371                         ifname_matched 
= true; 
3372                         owner_matched 
= true; 
3374                         if (psk
->psk_ifname
[0] && 
3375                             strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) { 
3376                                 ifname_matched 
= false; 
3379                         if (psk
->psk_ownername
[0] && 
3380                             ((NULL 
== s
->rule
.ptr
) || 
3381                             strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) { 
3382                                 owner_matched 
= false; 
3385                         unlink_state 
= ifname_matched 
&& owner_matched
; 
3389                                 /* don't send out individual delete messages */ 
3390                                 s
->sync_flags 
= PFSTATE_NOSYNC
; 
3396                 psk
->psk_af 
= (sa_family_t
)killed
; 
3398                 pfsync_clear_states(pf_status
.hostid
, psk
->psk_ifname
); 
3403         case DIOCKILLSTATES
: { 
3404                 struct pf_state         
*s
, *nexts
; 
3405                 struct pf_state_key     
*sk
; 
3406                 struct pf_state_host    
*src
, *dst
; 
3409                 for (s 
= RB_MIN(pf_state_tree_id
, &tree_id
); s
; 
3411                         nexts 
= RB_NEXT(pf_state_tree_id
, &tree_id
, s
); 
3413                         ifname_matched 
= true; 
3414                         owner_matched 
= true; 
3416                         if (psk
->psk_ifname
[0] && 
3417                             strcmp(psk
->psk_ifname
, s
->kif
->pfik_name
)) { 
3418                                 ifname_matched 
= false; 
3421                         if (psk
->psk_ownername
[0] && 
3422                             ((NULL 
== s
->rule
.ptr
) || 
3423                             strcmp(psk
->psk_ownername
, s
->rule
.ptr
->owner
))) { 
3424                                 owner_matched 
= false; 
3427                         if (sk
->direction 
== PF_OUT
) { 
3434                         if ((!psk
->psk_af 
|| sk
->af_lan 
== psk
->psk_af
) && 
3435                             (!psk
->psk_proto 
|| psk
->psk_proto 
== sk
->proto
) && 
3436                             PF_MATCHA(psk
->psk_src
.neg
, 
3437                             &psk
->psk_src
.addr
.v
.a
.addr
, 
3438                             &psk
->psk_src
.addr
.v
.a
.mask
, 
3439                             &src
->addr
, sk
->af_lan
) && 
3440                             PF_MATCHA(psk
->psk_dst
.neg
, 
3441                             &psk
->psk_dst
.addr
.v
.a
.addr
, 
3442                             &psk
->psk_dst
.addr
.v
.a
.mask
, 
3443                             &dst
->addr
, sk
->af_lan
) && 
3444                             (pf_match_xport(psk
->psk_proto
, 
3445                             psk
->psk_proto_variant
, &psk
->psk_src
.xport
, 
3447                             (pf_match_xport(psk
->psk_proto
, 
3448                             psk
->psk_proto_variant
, &psk
->psk_dst
.xport
, 
3453                                 /* send immediate delete of state */ 
3454                                 pfsync_delete_state(s
); 
3455                                 s
->sync_flags 
|= PFSTATE_NOSYNC
; 
3461                 psk
->psk_af 
= (sa_family_t
)killed
; 
3474 pfioctl_ioc_state(u_long cmd
, struct pfioc_state 
*ps
, struct proc 
*p
) 
3480         case DIOCADDSTATE
: { 
3481                 struct pfsync_state     
*sp 
= &ps
->state
; 
3483                 struct pf_state_key     
*sk
; 
3484                 struct pfi_kif          
*kif
; 
3486                 if (sp
->timeout 
>= PFTM_MAX
) { 
3490                 s 
= pool_get(&pf_state_pl
, PR_WAITOK
); 
3495                 bzero(s
, sizeof(struct pf_state
)); 
3496                 if ((sk 
= pf_alloc_state_key(s
, NULL
)) == NULL
) { 
3497                         pool_put(&pf_state_pl
, s
); 
3501                 pf_state_import(sp
, sk
, s
); 
3502                 kif 
= pfi_kif_get(sp
->ifname
); 
3504                         pool_put(&pf_state_pl
, s
); 
3505                         pool_put(&pf_state_key_pl
, sk
); 
3509                 TAILQ_INIT(&s
->unlink_hooks
); 
3510                 s
->state_key
->app_state 
= 0; 
3511                 if (pf_insert_state(kif
, s
)) { 
3512                         pfi_kif_unref(kif
, PFI_KIF_REF_NONE
); 
3513                         pool_put(&pf_state_pl
, s
); 
3517                 pf_default_rule
.states
++; 
3518                 VERIFY(pf_default_rule
.states 
!= 0); 
3522         case DIOCGETSTATE
: { 
3524                 struct pf_state_cmp      id_key
; 
3526                 bcopy(ps
->state
.id
, &id_key
.id
, sizeof(id_key
.id
)); 
3527                 id_key
.creatorid 
= ps
->state
.creatorid
; 
3529                 s 
= pf_find_state_byid(&id_key
); 
3535                 pf_state_export(&ps
->state
, s
->state_key
, s
); 
3548 pfioctl_ioc_states(u_long cmd
, struct pfioc_states_32 
*ps32
, 
3549     struct pfioc_states_64 
*ps64
, struct proc 
*p
) 
3551         int p64 
= proc_is64bit(p
); 
3555         case DIOCGETSTATES
: {           /* struct pfioc_states */ 
3556                 struct pf_state         
*state
; 
3557                 struct pfsync_state     
*pstore
; 
3562                 len 
= (p64 
? ps64
->ps_len 
: ps32
->ps_len
); 
3564                         size 
= sizeof(struct pfsync_state
) * pf_status
.states
; 
3566                                 ps64
->ps_len 
= size
; 
3568                                 ps32
->ps_len 
= size
; 
3573                 pstore 
= _MALLOC(sizeof(*pstore
), M_TEMP
, M_WAITOK 
| M_ZERO
); 
3574                 if (pstore 
== NULL
) { 
3579                 buf 
= (p64 
? ps64
->ps_buf 
: ps32
->ps_buf
); 
3584                 state 
= TAILQ_FIRST(&state_list
); 
3586                         if (state
->timeout 
!= PFTM_UNLINKED
) { 
3587                                 if ((nr 
+ 1) * sizeof(*pstore
) > (unsigned)len
) { 
3591                                 pf_state_export(pstore
, 
3592                                     state
->state_key
, state
); 
3593                                 error 
= copyout(pstore
, buf
, sizeof(*pstore
)); 
3595                                         _FREE(pstore
, M_TEMP
); 
3598                                 buf 
+= sizeof(*pstore
); 
3601                         state 
= TAILQ_NEXT(state
, entry_list
); 
3604                 size 
= sizeof(struct pfsync_state
) * nr
; 
3606                         ps64
->ps_len 
= size
; 
3608                         ps32
->ps_len 
= size
; 
3611                 _FREE(pstore
, M_TEMP
); 
3624 pfioctl_ioc_natlook(u_long cmd
, struct pfioc_natlook 
*pnl
, struct proc 
*p
) 
3631                 struct pf_state_key     
*sk
; 
3632                 struct pf_state         
*state
; 
3633                 struct pf_state_key_cmp  key
; 
3634                 int                      m 
= 0, direction 
= pnl
->direction
; 
3636                 key
.proto 
= pnl
->proto
; 
3637                 key
.proto_variant 
= pnl
->proto_variant
; 
3640                     PF_AZERO(&pnl
->saddr
, pnl
->af
) || 
3641                     PF_AZERO(&pnl
->daddr
, pnl
->af
) || 
3642                     ((pnl
->proto 
== IPPROTO_TCP 
|| 
3643                     pnl
->proto 
== IPPROTO_UDP
) && 
3644                     (!pnl
->dxport
.port 
|| !pnl
->sxport
.port
))) { 
3648                          * userland gives us source and dest of connection, 
3649                          * reverse the lookup so we ask for what happens with 
3650                          * the return traffic, enabling us to find it in the 
3653                         if (direction 
== PF_IN
) { 
3654                                 key
.af_gwy 
= pnl
->af
; 
3655                                 PF_ACPY(&key
.ext_gwy
.addr
, &pnl
->daddr
, 
3657                                 memcpy(&key
.ext_gwy
.xport
, &pnl
->dxport
, 
3658                                     sizeof(key
.ext_gwy
.xport
)); 
3659                                 PF_ACPY(&key
.gwy
.addr
, &pnl
->saddr
, pnl
->af
); 
3660                                 memcpy(&key
.gwy
.xport
, &pnl
->sxport
, 
3661                                     sizeof(key
.gwy
.xport
)); 
3662                                 state 
= pf_find_state_all(&key
, PF_IN
, &m
); 
3664                                 key
.af_lan 
= pnl
->af
; 
3665                                 PF_ACPY(&key
.lan
.addr
, &pnl
->daddr
, pnl
->af
); 
3666                                 memcpy(&key
.lan
.xport
, &pnl
->dxport
, 
3667                                     sizeof(key
.lan
.xport
)); 
3668                                 PF_ACPY(&key
.ext_lan
.addr
, &pnl
->saddr
, 
3670                                 memcpy(&key
.ext_lan
.xport
, &pnl
->sxport
, 
3671                                     sizeof(key
.ext_lan
.xport
)); 
3672                                 state 
= pf_find_state_all(&key
, PF_OUT
, &m
); 
3675                                 error 
= E2BIG
;  /* more than one state */ 
3676                         } else if (state 
!= NULL
) { 
3677                                 sk 
= state
->state_key
; 
3678                                 if (direction 
== PF_IN
) { 
3679                                         PF_ACPY(&pnl
->rsaddr
, &sk
->lan
.addr
, 
3681                                         memcpy(&pnl
->rsxport
, &sk
->lan
.xport
, 
3682                                             sizeof(pnl
->rsxport
)); 
3683                                         PF_ACPY(&pnl
->rdaddr
, &pnl
->daddr
, 
3685                                         memcpy(&pnl
->rdxport
, &pnl
->dxport
, 
3686                                             sizeof(pnl
->rdxport
)); 
3688                                         PF_ACPY(&pnl
->rdaddr
, &sk
->gwy
.addr
, 
3690                                         memcpy(&pnl
->rdxport
, &sk
->gwy
.xport
, 
3691                                             sizeof(pnl
->rdxport
)); 
3692                                         PF_ACPY(&pnl
->rsaddr
, &pnl
->saddr
, 
3694                                         memcpy(&pnl
->rsxport
, &pnl
->sxport
, 
3695                                             sizeof(pnl
->rsxport
)); 
3713 pfioctl_ioc_tm(u_long cmd
, struct pfioc_tm 
*pt
, struct proc 
*p
) 
3719         case DIOCSETTIMEOUT
: { 
3722                 if (pt
->timeout 
< 0 || pt
->timeout 
>= PFTM_MAX 
|| 
3727                 old 
= pf_default_rule
.timeout
[pt
->timeout
]; 
3728                 if (pt
->timeout 
== PFTM_INTERVAL 
&& pt
->seconds 
== 0) { 
3731                 pf_default_rule
.timeout
[pt
->timeout
] = pt
->seconds
; 
3732                 if (pt
->timeout 
== PFTM_INTERVAL 
&& pt
->seconds 
< old
) { 
3733                         wakeup(pf_purge_thread_fn
); 
3739         case DIOCGETTIMEOUT
: { 
3740                 if (pt
->timeout 
< 0 || pt
->timeout 
>= PFTM_MAX
) { 
3744                 pt
->seconds 
= pf_default_rule
.timeout
[pt
->timeout
]; 
3757 pfioctl_ioc_limit(u_long cmd
, struct pfioc_limit 
*pl
, struct proc 
*p
) 
3763         case DIOCGETLIMIT
: { 
3764                 if (pl
->index 
< 0 || pl
->index 
>= PF_LIMIT_MAX
) { 
3768                 pl
->limit 
= pf_pool_limits
[pl
->index
].limit
; 
3772         case DIOCSETLIMIT
: { 
3775                 if (pl
->index 
< 0 || pl
->index 
>= PF_LIMIT_MAX 
|| 
3776                     pf_pool_limits
[pl
->index
].pp 
== NULL
) { 
3780                 pool_sethardlimit(pf_pool_limits
[pl
->index
].pp
, 
3781                     pl
->limit
, NULL
, 0); 
3782                 old_limit 
= pf_pool_limits
[pl
->index
].limit
; 
3783                 pf_pool_limits
[pl
->index
].limit 
= pl
->limit
; 
3784                 pl
->limit 
= old_limit
; 
3797 pfioctl_ioc_pooladdr(u_long cmd
, struct pfioc_pooladdr 
*pp
, struct proc 
*p
) 
3800         struct pf_pooladdr 
*pa 
= NULL
; 
3801         struct pf_pool 
*pool 
= NULL
; 
3805         case DIOCBEGINADDRS
: { 
3806                 pf_empty_pool(&pf_pabuf
); 
3807                 pp
->ticket 
= ++ticket_pabuf
; 
3812                 pp
->anchor
[sizeof(pp
->anchor
) - 1] = '\0'; 
3813                 if (pp
->ticket 
!= ticket_pabuf
) { 
3818                 if (pp
->af 
== AF_INET
) { 
3819                         error 
= EAFNOSUPPORT
; 
3823                 if (pp
->addr
.addr
.type 
!= PF_ADDR_ADDRMASK 
&& 
3824                     pp
->addr
.addr
.type 
!= PF_ADDR_DYNIFTL 
&& 
3825                     pp
->addr
.addr
.type 
!= PF_ADDR_TABLE
) { 
3829                 pa 
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
); 
3834                 pf_pooladdr_copyin(&pp
->addr
, pa
); 
3835                 if (pa
->ifname
[0]) { 
3836                         pa
->kif 
= pfi_kif_get(pa
->ifname
); 
3837                         if (pa
->kif 
== NULL
) { 
3838                                 pool_put(&pf_pooladdr_pl
, pa
); 
3842                         pfi_kif_ref(pa
->kif
, PFI_KIF_REF_RULE
); 
3844                 pf_addrwrap_setup(&pa
->addr
); 
3845                 if (pfi_dynaddr_setup(&pa
->addr
, pp
->af
)) { 
3846                         pfi_dynaddr_remove(&pa
->addr
); 
3847                         pfi_kif_unref(pa
->kif
, PFI_KIF_REF_RULE
); 
3848                         pool_put(&pf_pooladdr_pl
, pa
); 
3852                 TAILQ_INSERT_TAIL(&pf_pabuf
, pa
, entries
); 
3856         case DIOCGETADDRS
: { 
3858                 pp
->anchor
[sizeof(pp
->anchor
) - 1] = '\0'; 
3859                 pool 
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
, 
3860                     pp
->r_num
, 0, 1, 0); 
3865                 TAILQ_FOREACH(pa
, &pool
->list
, entries
) 
3873                 pp
->anchor
[sizeof(pp
->anchor
) - 1] = '\0'; 
3874                 pool 
= pf_get_pool(pp
->anchor
, pp
->ticket
, pp
->r_action
, 
3875                     pp
->r_num
, 0, 1, 1); 
3880                 pa 
= TAILQ_FIRST(&pool
->list
); 
3881                 while ((pa 
!= NULL
) && (nr 
< pp
->nr
)) { 
3882                         pa 
= TAILQ_NEXT(pa
, entries
); 
3889                 pf_pooladdr_copyout(pa
, &pp
->addr
); 
3890                 pfi_dynaddr_copyout(&pp
->addr
.addr
); 
3891                 pf_tbladdr_copyout(&pp
->addr
.addr
); 
3892                 pf_rtlabel_copyout(&pp
->addr
.addr
); 
3896         case DIOCCHANGEADDR
: { 
3897                 struct pfioc_pooladdr   
*pca 
= pp
; 
3898                 struct pf_pooladdr      
*oldpa 
= NULL
, *newpa 
= NULL
; 
3899                 struct pf_ruleset       
*ruleset
; 
3901                 if (pca
->action 
< PF_CHANGE_ADD_HEAD 
|| 
3902                     pca
->action 
> PF_CHANGE_REMOVE
) { 
3906                 if (pca
->addr
.addr
.type 
!= PF_ADDR_ADDRMASK 
&& 
3907                     pca
->addr
.addr
.type 
!= PF_ADDR_DYNIFTL 
&& 
3908                     pca
->addr
.addr
.type 
!= PF_ADDR_TABLE
) { 
3913                 pca
->anchor
[sizeof(pca
->anchor
) - 1] = '\0'; 
3914                 ruleset 
= pf_find_ruleset(pca
->anchor
); 
3915                 if (ruleset 
== NULL
) { 
3919                 pool 
= pf_get_pool(pca
->anchor
, pca
->ticket
, pca
->r_action
, 
3920                     pca
->r_num
, pca
->r_last
, 1, 1); 
3925                 if (pca
->action 
!= PF_CHANGE_REMOVE
) { 
3926                         newpa 
= pool_get(&pf_pooladdr_pl
, PR_WAITOK
); 
3927                         if (newpa 
== NULL
) { 
3931                         pf_pooladdr_copyin(&pca
->addr
, newpa
); 
3933                         if (pca
->af 
== AF_INET
) { 
3934                                 pool_put(&pf_pooladdr_pl
, newpa
); 
3935                                 error 
= EAFNOSUPPORT
; 
3939                         if (newpa
->ifname
[0]) { 
3940                                 newpa
->kif 
= pfi_kif_get(newpa
->ifname
); 
3941                                 if (newpa
->kif 
== NULL
) { 
3942                                         pool_put(&pf_pooladdr_pl
, newpa
); 
3946                                 pfi_kif_ref(newpa
->kif
, PFI_KIF_REF_RULE
); 
3950                         pf_addrwrap_setup(&newpa
->addr
); 
3951                         if (pfi_dynaddr_setup(&newpa
->addr
, pca
->af
) || 
3952                             pf_tbladdr_setup(ruleset
, &newpa
->addr
)) { 
3953                                 pfi_dynaddr_remove(&newpa
->addr
); 
3954                                 pfi_kif_unref(newpa
->kif
, PFI_KIF_REF_RULE
); 
3955                                 pool_put(&pf_pooladdr_pl
, newpa
); 
3961                 if (pca
->action 
== PF_CHANGE_ADD_HEAD
) { 
3962                         oldpa 
= TAILQ_FIRST(&pool
->list
); 
3963                 } else if (pca
->action 
== PF_CHANGE_ADD_TAIL
) { 
3964                         oldpa 
= TAILQ_LAST(&pool
->list
, pf_palist
); 
3968                         oldpa 
= TAILQ_FIRST(&pool
->list
); 
3969                         while ((oldpa 
!= NULL
) && (i 
< (int)pca
->nr
)) { 
3970                                 oldpa 
= TAILQ_NEXT(oldpa
, entries
); 
3973                         if (oldpa 
== NULL
) { 
3979                 if (pca
->action 
== PF_CHANGE_REMOVE
) { 
3980                         TAILQ_REMOVE(&pool
->list
, oldpa
, entries
); 
3981                         pfi_dynaddr_remove(&oldpa
->addr
); 
3982                         pf_tbladdr_remove(&oldpa
->addr
); 
3983                         pfi_kif_unref(oldpa
->kif
, PFI_KIF_REF_RULE
); 
3984                         pool_put(&pf_pooladdr_pl
, oldpa
); 
3986                         if (oldpa 
== NULL
) { 
3987                                 TAILQ_INSERT_TAIL(&pool
->list
, newpa
, entries
); 
3988                         } else if (pca
->action 
== PF_CHANGE_ADD_HEAD 
|| 
3989                             pca
->action 
== PF_CHANGE_ADD_BEFORE
) { 
3990                                 TAILQ_INSERT_BEFORE(oldpa
, newpa
, entries
); 
3992                                 TAILQ_INSERT_AFTER(&pool
->list
, oldpa
, 
3997                 pool
->cur 
= TAILQ_FIRST(&pool
->list
); 
3998                 PF_ACPY(&pool
->counter
, &pool
->cur
->addr
.v
.a
.addr
, 
4012 pfioctl_ioc_ruleset(u_long cmd
, struct pfioc_ruleset 
*pr
, struct proc 
*p
) 
4018         case DIOCGETRULESETS
: { 
4019                 struct pf_ruleset       
*ruleset
; 
4020                 struct pf_anchor        
*anchor
; 
4022                 pr
->path
[sizeof(pr
->path
) - 1] = '\0'; 
4023                 pr
->name
[sizeof(pr
->name
) - 1] = '\0'; 
4024                 if ((ruleset 
= pf_find_ruleset(pr
->path
)) == NULL
) { 
4029                 if (ruleset
->anchor 
== NULL
) { 
4030                         /* XXX kludge for pf_main_ruleset */ 
4031                         RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
) 
4032                         if (anchor
->parent 
== NULL
) { 
4036                         RB_FOREACH(anchor
, pf_anchor_node
, 
4037                             &ruleset
->anchor
->children
) 
4043         case DIOCGETRULESET
: { 
4044                 struct pf_ruleset       
*ruleset
; 
4045                 struct pf_anchor        
*anchor
; 
4048                 pr
->path
[sizeof(pr
->path
) - 1] = '\0'; 
4049                 if ((ruleset 
= pf_find_ruleset(pr
->path
)) == NULL
) { 
4054                 if (ruleset
->anchor 
== NULL
) { 
4055                         /* XXX kludge for pf_main_ruleset */ 
4056                         RB_FOREACH(anchor
, pf_anchor_global
, &pf_anchors
) 
4057                         if (anchor
->parent 
== NULL 
&& nr
++ == pr
->nr
) { 
4058                                 strlcpy(pr
->name
, anchor
->name
, 
4063                         RB_FOREACH(anchor
, pf_anchor_node
, 
4064                             &ruleset
->anchor
->children
) 
4065                         if (nr
++ == pr
->nr
) { 
4066                                 strlcpy(pr
->name
, anchor
->name
, 
4086 pfioctl_ioc_trans(u_long cmd
, struct pfioc_trans_32 
*io32
, 
4087     struct pfioc_trans_64 
*io64
, struct proc 
*p
) 
4089         int error 
= 0, esize
, size
; 
4093         int p64 
= proc_is64bit(p
); 
4095         esize 
= (p64 
? io64
->esize 
: io32
->esize
); 
4096         size 
= (p64 
? io64
->size 
: io32
->size
); 
4097         buf 
= (p64 
? io64
->array 
: io32
->array
); 
4099 #pragma unused(io64, p) 
4100         esize 
= io32
->esize
; 
4107                 struct pfioc_trans_e    
*ioe
; 
4108                 struct pfr_table        
*table
; 
4111                 if (esize 
!= sizeof(*ioe
)) { 
4115                 ioe 
= _MALLOC(sizeof(*ioe
), M_TEMP
, M_WAITOK
); 
4116                 table 
= _MALLOC(sizeof(*table
), M_TEMP
, M_WAITOK
); 
4117                 for (i 
= 0; i 
< size
; i
++, buf 
+= sizeof(*ioe
)) { 
4118                         if (copyin(buf
, ioe
, sizeof(*ioe
))) { 
4119                                 _FREE(table
, M_TEMP
); 
4124                         ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0'; 
4125                         switch (ioe
->rs_num
) { 
4126                         case PF_RULESET_ALTQ
: 
4128                         case PF_RULESET_TABLE
: 
4129                                 bzero(table
, sizeof(*table
)); 
4130                                 strlcpy(table
->pfrt_anchor
, ioe
->anchor
, 
4131                                     sizeof(table
->pfrt_anchor
)); 
4132                                 if ((error 
= pfr_ina_begin(table
, 
4133                                     &ioe
->ticket
, NULL
, 0))) { 
4134                                         _FREE(table
, M_TEMP
); 
4140                                 if ((error 
= pf_begin_rules(&ioe
->ticket
, 
4141                                     ioe
->rs_num
, ioe
->anchor
))) { 
4142                                         _FREE(table
, M_TEMP
); 
4148                         if (copyout(ioe
, buf
, sizeof(*ioe
))) { 
4149                                 _FREE(table
, M_TEMP
); 
4155                 _FREE(table
, M_TEMP
); 
4160         case DIOCXROLLBACK
: { 
4161                 struct pfioc_trans_e    
*ioe
; 
4162                 struct pfr_table        
*table
; 
4165                 if (esize 
!= sizeof(*ioe
)) { 
4169                 ioe 
= _MALLOC(sizeof(*ioe
), M_TEMP
, M_WAITOK
); 
4170                 table 
= _MALLOC(sizeof(*table
), M_TEMP
, M_WAITOK
); 
4171                 for (i 
= 0; i 
< size
; i
++, buf 
+= sizeof(*ioe
)) { 
4172                         if (copyin(buf
, ioe
, sizeof(*ioe
))) { 
4173                                 _FREE(table
, M_TEMP
); 
4178                         ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0'; 
4179                         switch (ioe
->rs_num
) { 
4180                         case PF_RULESET_ALTQ
: 
4182                         case PF_RULESET_TABLE
: 
4183                                 bzero(table
, sizeof(*table
)); 
4184                                 strlcpy(table
->pfrt_anchor
, ioe
->anchor
, 
4185                                     sizeof(table
->pfrt_anchor
)); 
4186                                 if ((error 
= pfr_ina_rollback(table
, 
4187                                     ioe
->ticket
, NULL
, 0))) { 
4188                                         _FREE(table
, M_TEMP
); 
4190                                         goto fail
; /* really bad */ 
4194                                 if ((error 
= pf_rollback_rules(ioe
->ticket
, 
4195                                     ioe
->rs_num
, ioe
->anchor
))) { 
4196                                         _FREE(table
, M_TEMP
); 
4198                                         goto fail
; /* really bad */ 
4203                 _FREE(table
, M_TEMP
); 
4209                 struct pfioc_trans_e    
*ioe
; 
4210                 struct pfr_table        
*table
; 
4211                 struct pf_ruleset       
*rs
; 
4212                 user_addr_t              _buf 
= buf
; 
4215                 if (esize 
!= sizeof(*ioe
)) { 
4219                 ioe 
= _MALLOC(sizeof(*ioe
), M_TEMP
, M_WAITOK
); 
4220                 table 
= _MALLOC(sizeof(*table
), M_TEMP
, M_WAITOK
); 
4221                 /* first makes sure everything will succeed */ 
4222                 for (i 
= 0; i 
< size
; i
++, buf 
+= sizeof(*ioe
)) { 
4223                         if (copyin(buf
, ioe
, sizeof(*ioe
))) { 
4224                                 _FREE(table
, M_TEMP
); 
4229                         ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0'; 
4230                         switch (ioe
->rs_num
) { 
4231                         case PF_RULESET_ALTQ
: 
4233                         case PF_RULESET_TABLE
: 
4234                                 rs 
= pf_find_ruleset(ioe
->anchor
); 
4235                                 if (rs 
== NULL 
|| !rs
->topen 
|| ioe
->ticket 
!= 
4237                                         _FREE(table
, M_TEMP
); 
4244                                 if (ioe
->rs_num 
< 0 || ioe
->rs_num 
>= 
4246                                         _FREE(table
, M_TEMP
); 
4251                                 rs 
= pf_find_ruleset(ioe
->anchor
); 
4253                                     !rs
->rules
[ioe
->rs_num
].inactive
.open 
|| 
4254                                     rs
->rules
[ioe
->rs_num
].inactive
.ticket 
!= 
4256                                         _FREE(table
, M_TEMP
); 
4265                 /* now do the commit - no errors should happen here */ 
4266                 for (i 
= 0; i 
< size
; i
++, buf 
+= sizeof(*ioe
)) { 
4267                         if (copyin(buf
, ioe
, sizeof(*ioe
))) { 
4268                                 _FREE(table
, M_TEMP
); 
4273                         ioe
->anchor
[sizeof(ioe
->anchor
) - 1] = '\0'; 
4274                         switch (ioe
->rs_num
) { 
4275                         case PF_RULESET_ALTQ
: 
4277                         case PF_RULESET_TABLE
: 
4278                                 bzero(table
, sizeof(*table
)); 
4279                                 strlcpy(table
->pfrt_anchor
, ioe
->anchor
, 
4280                                     sizeof(table
->pfrt_anchor
)); 
4281                                 if ((error 
= pfr_ina_commit(table
, ioe
->ticket
, 
4283                                         _FREE(table
, M_TEMP
); 
4285                                         goto fail
; /* really bad */ 
4289                                 if ((error 
= pf_commit_rules(ioe
->ticket
, 
4290                                     ioe
->rs_num
, ioe
->anchor
))) { 
4291                                         _FREE(table
, M_TEMP
); 
4293                                         goto fail
; /* really bad */ 
4298                 _FREE(table
, M_TEMP
); 
4312 pfioctl_ioc_src_nodes(u_long cmd
, struct pfioc_src_nodes_32 
*psn32
, 
4313     struct pfioc_src_nodes_64 
*psn64
, struct proc 
*p
) 
4315         int p64 
= proc_is64bit(p
); 
4319         case DIOCGETSRCNODES
: { 
4320                 struct pf_src_node      
*n
, *pstore
; 
4325                 space 
= (p64 
? psn64
->psn_len 
: psn32
->psn_len
); 
4327                         RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) 
4330                         size 
= sizeof(struct pf_src_node
) * nr
; 
4332                                 psn64
->psn_len 
= size
; 
4334                                 psn32
->psn_len 
= size
; 
4339                 pstore 
= _MALLOC(sizeof(*pstore
), M_TEMP
, M_WAITOK
); 
4340                 if (pstore 
== NULL
) { 
4345                 buf 
= (p64 
? psn64
->psn_buf 
: psn32
->psn_buf
); 
4347                 buf 
= psn32
->psn_buf
; 
4350                 RB_FOREACH(n
, pf_src_tree
, &tree_src_tracking
) { 
4351                         uint64_t secs 
= pf_time_second(), diff
; 
4353                         if ((nr 
+ 1) * sizeof(*pstore
) > (unsigned)space
) { 
4357                         bcopy(n
, pstore
, sizeof(*pstore
)); 
4358                         if (n
->rule
.ptr 
!= NULL
) { 
4359                                 pstore
->rule
.nr 
= n
->rule
.ptr
->nr
; 
4361                         pstore
->creation 
= secs 
- pstore
->creation
; 
4362                         if (pstore
->expire 
> secs
) { 
4363                                 pstore
->expire 
-= secs
; 
4368                         /* adjust the connection rate estimate */ 
4369                         diff 
= secs 
- n
->conn_rate
.last
; 
4370                         if (diff 
>= n
->conn_rate
.seconds
) { 
4371                                 pstore
->conn_rate
.count 
= 0; 
4373                                 pstore
->conn_rate
.count 
-= 
4374                                     n
->conn_rate
.count 
* diff 
/ 
4375                                     n
->conn_rate
.seconds
; 
4378                         _RB_PARENT(pstore
, entry
) = NULL
; 
4379                         RB_LEFT(pstore
, entry
) = RB_RIGHT(pstore
, entry
) = NULL
; 
4382                         error 
= copyout(pstore
, buf
, sizeof(*pstore
)); 
4384                                 _FREE(pstore
, M_TEMP
); 
4387                         buf 
+= sizeof(*pstore
); 
4391                 size 
= sizeof(struct pf_src_node
) * nr
; 
4393                         psn64
->psn_len 
= size
; 
4395                         psn32
->psn_len 
= size
; 
4398                 _FREE(pstore
, M_TEMP
); 
4411 pfioctl_ioc_src_node_kill(u_long cmd
, struct pfioc_src_node_kill 
*psnk
, 
4418         case DIOCKILLSRCNODES
: { 
4419                 struct pf_src_node      
*sn
; 
4423                 RB_FOREACH(sn
, pf_src_tree
, &tree_src_tracking
) { 
4424                         if (PF_MATCHA(psnk
->psnk_src
.neg
, 
4425                             &psnk
->psnk_src
.addr
.v
.a
.addr
, 
4426                             &psnk
->psnk_src
.addr
.v
.a
.mask
, 
4427                             &sn
->addr
, sn
->af
) && 
4428                             PF_MATCHA(psnk
->psnk_dst
.neg
, 
4429                             &psnk
->psnk_dst
.addr
.v
.a
.addr
, 
4430                             &psnk
->psnk_dst
.addr
.v
.a
.mask
, 
4431                             &sn
->raddr
, sn
->af
)) { 
4432                                 /* Handle state to src_node linkage */ 
4433                                 if (sn
->states 
!= 0) { 
4434                                         RB_FOREACH(s
, pf_state_tree_id
, 
4436                                                 if (s
->src_node 
== sn
) { 
4439                                                 if (s
->nat_src_node 
== sn
) { 
4440                                                         s
->nat_src_node 
= NULL
; 
4451                         pf_purge_expired_src_nodes(); 
4454                 psnk
->psnk_af 
= (sa_family_t
)killed
; 
4467 pfioctl_ioc_iface(u_long cmd
, struct pfioc_iface_32 
*io32
, 
4468     struct pfioc_iface_64 
*io64
, struct proc 
*p
) 
4470         int p64 
= proc_is64bit(p
); 
4474         case DIOCIGETIFACES
: { 
4479                 buf 
= (p64 
? io64
->pfiio_buffer 
: io32
->pfiio_buffer
); 
4480                 esize 
= (p64 
? io64
->pfiio_esize 
: io32
->pfiio_esize
); 
4482                 buf 
= io32
->pfiio_buffer
; 
4483                 esize 
= io32
->pfiio_esize
; 
4486                 /* esize must be that of the user space version of pfi_kif */ 
4487                 if (esize 
!= sizeof(struct pfi_uif
)) { 
4492                         io64
->pfiio_name
[sizeof(io64
->pfiio_name
) - 1] = '\0'; 
4494                         io32
->pfiio_name
[sizeof(io32
->pfiio_name
) - 1] = '\0'; 
4496                 error 
= pfi_get_ifaces( 
4497                         p64 
? io64
->pfiio_name 
: io32
->pfiio_name
, buf
, 
4498                         p64 
? &io64
->pfiio_size 
: &io32
->pfiio_size
); 
4502         case DIOCSETIFFLAG
: { 
4504                         io64
->pfiio_name
[sizeof(io64
->pfiio_name
) - 1] = '\0'; 
4506                         io32
->pfiio_name
[sizeof(io32
->pfiio_name
) - 1] = '\0'; 
4509                 error 
= pfi_set_flags( 
4510                         p64 
? io64
->pfiio_name 
: io32
->pfiio_name
, 
4511                         p64 
? io64
->pfiio_flags 
: io32
->pfiio_flags
); 
4515         case DIOCCLRIFFLAG
: { 
4517                         io64
->pfiio_name
[sizeof(io64
->pfiio_name
) - 1] = '\0'; 
4519                         io32
->pfiio_name
[sizeof(io32
->pfiio_name
) - 1] = '\0'; 
4522                 error 
= pfi_clear_flags( 
4523                         p64 
? io64
->pfiio_name 
: io32
->pfiio_name
, 
4524                         p64 
? io64
->pfiio_flags 
: io32
->pfiio_flags
); 
4537 pf_af_hook(struct ifnet 
*ifp
, struct mbuf 
**mppn
, struct mbuf 
**mp
, 
4538     unsigned int af
, int input
, struct ip_fw_args 
*fwa
) 
4541         struct mbuf 
*nextpkt
; 
4542         net_thread_marks_t marks
; 
4543         struct ifnet 
* pf_ifp 
= ifp
; 
4545         /* Always allow traffic on co-processor interfaces. */ 
4546         if (!intcoproc_unrestricted 
&& ifp 
&& IFNET_IS_INTCOPROC(ifp
)) { 
4550         marks 
= net_thread_marks_push(NET_THREAD_HELD_PF
); 
4552         if (marks 
!= net_thread_marks_none
) { 
4553                 lck_rw_lock_shared(pf_perim_lock
); 
4554                 if (!pf_is_enabled
) { 
4557                 lck_mtx_lock(pf_lock
); 
4560         if (mppn 
!= NULL 
&& *mppn 
!= NULL
) { 
4561                 VERIFY(*mppn 
== *mp
); 
4563         if ((nextpkt 
= (*mp
)->m_nextpkt
) != NULL
) { 
4564                 (*mp
)->m_nextpkt 
= NULL
; 
4568          * For packets destined to locally hosted IP address 
4569          * ip_output_list sets Mbuf's pkt header's rcvif to 
4570          * the interface hosting the IP address. 
4571          * While on the output path ifp passed to pf_af_hook 
4572          * to such local communication is the loopback interface, 
4573          * the input path derives ifp from mbuf packet header's 
4575          * This asymmetry caues issues with PF. 
4576          * To handle that case, we have a limited change here to 
4577          * pass interface as loopback if packets are looped in. 
4579         if (input 
&& ((*mp
)->m_pkthdr
.pkt_flags 
& PKTF_LOOP
)) { 
4586                 error 
= pf_inet_hook(pf_ifp
, mp
, input
, fwa
); 
4591                 error 
= pf_inet6_hook(pf_ifp
, mp
, input
, fwa
); 
4597         /* When packet valid, link to the next packet */ 
4598         if (*mp 
!= NULL 
&& nextpkt 
!= NULL
) { 
4599                 struct mbuf 
*m 
= *mp
; 
4600                 while (m
->m_nextpkt 
!= NULL
) { 
4603                 m
->m_nextpkt 
= nextpkt
; 
4605         /* Fix up linkage of previous packet in the chain */ 
4614         if (marks 
!= net_thread_marks_none
) { 
4615                 lck_mtx_unlock(pf_lock
); 
4619         if (marks 
!= net_thread_marks_none
) { 
4620                 lck_rw_done(pf_perim_lock
); 
4623         net_thread_marks_pop(marks
); 
4630 pf_inet_hook(struct ifnet 
*ifp
, struct mbuf 
**mp
, int input
, 
4631     struct ip_fw_args 
*fwa
) 
4633         struct mbuf 
*m 
= *mp
; 
4634 #if BYTE_ORDER != BIG_ENDIAN 
4635         struct ip 
*ip 
= mtod(m
, struct ip 
*); 
4640          * If the packet is outbound, is originated locally, is flagged for 
4641          * delayed UDP/TCP checksum calculation, and is about to be processed 
4642          * for an interface that doesn't support the appropriate checksum 
4643          * offloading, then calculated the checksum here so that PF can adjust 
4646         if (!input 
&& m
->m_pkthdr
.rcvif 
== NULL
) { 
4647                 static const int mask 
= CSUM_DELAY_DATA
; 
4648                 const int flags 
= m
->m_pkthdr
.csum_flags 
& 
4649                     ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
); 
4652                         in_delayed_cksum(m
); 
4653                         m
->m_pkthdr
.csum_flags 
&= ~mask
; 
4657 #if BYTE_ORDER != BIG_ENDIAN 
4661         if (pf_test_mbuf(input 
? PF_IN 
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) { 
4665                         error 
= EHOSTUNREACH
; 
4670 #if BYTE_ORDER != BIG_ENDIAN 
4673                         ip 
= mtod(*mp
, struct ip 
*); 
4684 pf_inet6_hook(struct ifnet 
*ifp
, struct mbuf 
**mp
, int input
, 
4685     struct ip_fw_args 
*fwa
) 
4690          * If the packet is outbound, is originated locally, is flagged for 
4691          * delayed UDP/TCP checksum calculation, and is about to be processed 
4692          * for an interface that doesn't support the appropriate checksum 
4693          * offloading, then calculated the checksum here so that PF can adjust 
4696         if (!input 
&& (*mp
)->m_pkthdr
.rcvif 
== NULL
) { 
4697                 static const int mask 
= CSUM_DELAY_IPV6_DATA
; 
4698                 const int flags 
= (*mp
)->m_pkthdr
.csum_flags 
& 
4699                     ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
); 
4703                          * Checksum offload should not have been enabled 
4704                          * when extension headers exist, thus 0 for optlen. 
4706                         in6_delayed_cksum(*mp
); 
4707                         (*mp
)->m_pkthdr
.csum_flags 
&= ~mask
; 
4711         if (pf_test6_mbuf(input 
? PF_IN 
: PF_OUT
, ifp
, mp
, NULL
, fwa
) != PF_PASS
) { 
4715                         error 
= EHOSTUNREACH
; 
4724 pf_ifaddr_hook(struct ifnet 
*ifp
) 
4726         struct pfi_kif 
*kif 
= ifp
->if_pf_kif
; 
4729                 lck_rw_lock_shared(pf_perim_lock
); 
4730                 lck_mtx_lock(pf_lock
); 
4732                 pfi_kifaddr_update(kif
); 
4734                 lck_mtx_unlock(pf_lock
); 
4735                 lck_rw_done(pf_perim_lock
); 
4741  * Caller acquires dlil lock as writer (exclusive) 
4744 pf_ifnet_hook(struct ifnet 
*ifp
, int attach
) 
4746         lck_rw_lock_shared(pf_perim_lock
); 
4747         lck_mtx_lock(pf_lock
); 
4749                 pfi_attach_ifnet(ifp
); 
4751                 pfi_detach_ifnet(ifp
); 
4753         lck_mtx_unlock(pf_lock
); 
4754         lck_rw_done(pf_perim_lock
); 
4758 pf_attach_hooks(void) 
4760         ifnet_head_lock_shared(); 
4762          * Check against ifnet_addrs[] before proceeding, in case this 
4763          * is called very early on, e.g. during dlil_init() before any 
4764          * network interface is attached. 
4766         if (ifnet_addrs 
!= NULL
) { 
4769                 for (i 
= 0; i 
<= if_index
; i
++) { 
4770                         struct ifnet 
*ifp 
= ifindex2ifnet
[i
]; 
4772                                 pfi_attach_ifnet(ifp
); 
4780 /* currently unused along with pfdetach() */ 
4782 pf_detach_hooks(void) 
4784         ifnet_head_lock_shared(); 
4785         if (ifnet_addrs 
!= NULL
) { 
4786                 for (i 
= 0; i 
<= if_index
; i
++) { 
4789                         struct ifnet 
*ifp 
= ifindex2ifnet
[i
]; 
4790                         if (ifp 
!= NULL 
&& ifp
->if_pf_kif 
!= NULL
) { 
4791                                 pfi_detach_ifnet(ifp
); 
4802  * The switch statement below does nothing at runtime, as it serves as a 
4803  * compile time check to ensure that all of the socket 'D' ioctls (those 
4804  * in the 'D' group going thru soo_ioctl) that are made available by the 
4805  * networking stack is unique.  This works as long as this routine gets 
4806  * updated each time a new interface ioctl gets added. 
4808  * Any failures at compile time indicates duplicated ioctl values. 
4810 static __attribute__((unused
)) void 
4811 pfioctl_cassert(void) 
4814          * This is equivalent to _CASSERT() and the compiler wouldn't 
4815          * generate any instructions, thus for compile time only. 
4817         switch ((u_long
)0) { 
4820         /* bsd/net/pfvar.h */ 
4824         case DIOCGETSTARTERS
: 
4831         case DIOCSETSTATUSIF
: 
4837         case DIOCCHANGERULE
: 
4838         case DIOCINSERTRULE
: 
4839         case DIOCDELETERULE
: 
4840         case DIOCSETTIMEOUT
: 
4841         case DIOCGETTIMEOUT
: 
4843         case DIOCCLRRULECTRS
: 
4846         case DIOCKILLSTATES
: 
4852         case DIOCCHANGEALTQ
: 
4854         case DIOCBEGINADDRS
: 
4858         case DIOCCHANGEADDR
: 
4859         case DIOCGETRULESETS
: 
4860         case DIOCGETRULESET
: 
4861         case DIOCRCLRTABLES
: 
4862         case DIOCRADDTABLES
: 
4863         case DIOCRDELTABLES
: 
4864         case DIOCRGETTABLES
: 
4865         case DIOCRGETTSTATS
: 
4866         case DIOCRCLRTSTATS
: 
4872         case DIOCRGETASTATS
: 
4873         case DIOCRCLRASTATS
: 
4875         case DIOCRSETTFLAGS
: 
4876         case DIOCRINADEFINE
: 
4883         case DIOCGETSRCNODES
: 
4884         case DIOCCLRSRCNODES
: 
4886         case DIOCIGETIFACES
: 
4889         case DIOCKILLSRCNODES
: