+ strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
+ break;
+ }
+
+ case DIOCCLRSTATUS: {
+ bzero(pf_status.counters, sizeof (pf_status.counters));
+ bzero(pf_status.fcounters, sizeof (pf_status.fcounters));
+ bzero(pf_status.scounters, sizeof (pf_status.scounters));
+ pf_status.since = pf_calendar_time_second();
+ if (*pf_status.ifname)
+ pfi_update_status(pf_status.ifname, NULL);
+ break;
+ }
+
+ case DIOCNATLOOK: { /* struct pfioc_natlook */
+ struct pfioc_natlook *pnl = NULL;
+
+ PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;);
+ error = pfioctl_ioc_natlook(cmd, pnl, p);
+ PFIOC_STRUCT_END(pnl, addr);
+ break;
+ }
+
+ case DIOCSETTIMEOUT: /* struct pfioc_tm */
+ case DIOCGETTIMEOUT: { /* struct pfioc_tm */
+ struct pfioc_tm pt;
+
+ /* small enough to be on stack */
+ bcopy(addr, &pt, sizeof (pt));
+ error = pfioctl_ioc_tm(cmd, &pt, p);
+ bcopy(&pt, addr, sizeof (pt));
+ break;
+ }
+
+ case DIOCGETLIMIT: /* struct pfioc_limit */
+ case DIOCSETLIMIT: { /* struct pfioc_limit */
+ struct pfioc_limit pl;
+
+ /* small enough to be on stack */
+ bcopy(addr, &pl, sizeof (pl));
+ error = pfioctl_ioc_limit(cmd, &pl, p);
+ bcopy(&pl, addr, sizeof (pl));
+ break;
+ }
+
+ case DIOCSETDEBUG: { /* u_int32_t */
+ bcopy(addr, &pf_status.debug, sizeof (u_int32_t));
+ break;
+ }
+
+ case DIOCCLRRULECTRS: {
+ /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
+ struct pf_ruleset *ruleset = &pf_main_ruleset;
+ struct pf_rule *rule;
+
+ TAILQ_FOREACH(rule,
+ ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
+ rule->evaluations = 0;
+ rule->packets[0] = rule->packets[1] = 0;
+ rule->bytes[0] = rule->bytes[1] = 0;
+ }
+ break;
+ }
+
+ case DIOCGIFSPEED: {
+ struct pf_ifspeed *psp = (struct pf_ifspeed *)(void *)addr;
+ struct pf_ifspeed ps;
+ struct ifnet *ifp;
+ u_int64_t baudrate;
+
+ if (psp->ifname[0] != '\0') {
+ /* Can we completely trust user-land? */
+ strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
+ ps.ifname[IFNAMSIZ - 1] = '\0';
+ ifp = ifunit(ps.ifname);
+ if (ifp != NULL) {
+ baudrate = ifp->if_output_bw.max_bw;
+ bcopy(&baudrate, &psp->baudrate,
+ sizeof (baudrate));
+ } else {
+ error = EINVAL;
+ }
+ } else {
+ error = EINVAL;
+ }
+ break;
+ }
+
+#if PF_ALTQ
+ case DIOCSTARTALTQ: {
+ struct pf_altq *altq;
+
+ VERIFY(altq_allowed);
+ /* enable all altq interfaces on active list */
+ TAILQ_FOREACH(altq, pf_altqs_active, entries) {
+ if (altq->qname[0] == '\0') {
+ error = pf_enable_altq(altq);
+ if (error != 0)
+ break;
+ }
+ }
+ if (error == 0)
+ pf_altq_running = 1;
+ DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
+ break;
+ }
+
+ case DIOCSTOPALTQ: {
+ struct pf_altq *altq;
+
+ VERIFY(altq_allowed);
+ /* disable all altq interfaces on active list */
+ TAILQ_FOREACH(altq, pf_altqs_active, entries) {
+ if (altq->qname[0] == '\0') {
+ error = pf_disable_altq(altq);
+ if (error != 0)
+ break;
+ }
+ }
+ if (error == 0)
+ pf_altq_running = 0;
+ DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
+ break;
+ }
+
+ case DIOCADDALTQ: { /* struct pfioc_altq */
+ struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
+ struct pf_altq *altq, *a;
+ u_int32_t ticket;
+
+ VERIFY(altq_allowed);
+ bcopy(&pa->ticket, &ticket, sizeof (ticket));
+ if (ticket != ticket_altqs_inactive) {
+ error = EBUSY;
+ break;
+ }
+ altq = pool_get(&pf_altq_pl, PR_WAITOK);
+ if (altq == NULL) {
+ error = ENOMEM;
+ break;
+ }
+ pf_altq_copyin(&pa->altq, altq);
+
+ /*
+ * if this is for a queue, find the discipline and
+ * copy the necessary fields
+ */
+ if (altq->qname[0] != '\0') {
+ if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
+ error = EBUSY;
+ pool_put(&pf_altq_pl, altq);
+ break;
+ }
+ altq->altq_disc = NULL;
+ TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
+ if (strncmp(a->ifname, altq->ifname,
+ IFNAMSIZ) == 0 && a->qname[0] == '\0') {
+ altq->altq_disc = a->altq_disc;
+ break;
+ }
+ }
+ }
+
+ error = altq_add(altq);
+ if (error) {
+ pool_put(&pf_altq_pl, altq);
+ break;
+ }
+
+ TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
+ pf_altq_copyout(altq, &pa->altq);
+ break;
+ }
+
+ case DIOCGETALTQS: {
+ struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
+ struct pf_altq *altq;
+ u_int32_t nr;
+
+ VERIFY(altq_allowed);
+ nr = 0;
+ TAILQ_FOREACH(altq, pf_altqs_active, entries)
+ nr++;
+ bcopy(&nr, &pa->nr, sizeof (nr));
+ bcopy(&ticket_altqs_active, &pa->ticket, sizeof (pa->ticket));
+ break;
+ }
+
+ case DIOCGETALTQ: {
+ struct pfioc_altq *pa = (struct pfioc_altq *)(void *)addr;
+ struct pf_altq *altq;
+ u_int32_t nr, pa_nr, ticket;
+
+ VERIFY(altq_allowed);
+ bcopy(&pa->ticket, &ticket, sizeof (ticket));
+ if (ticket != ticket_altqs_active) {
+ error = EBUSY;
+ break;
+ }
+ bcopy(&pa->nr, &pa_nr, sizeof (pa_nr));
+ nr = 0;
+ altq = TAILQ_FIRST(pf_altqs_active);
+ while ((altq != NULL) && (nr < pa_nr)) {
+ altq = TAILQ_NEXT(altq, entries);
+ nr++;
+ }
+ if (altq == NULL) {
+ error = EBUSY;
+ break;
+ }
+ pf_altq_copyout(altq, &pa->altq);
+ break;
+ }
+
+ case DIOCCHANGEALTQ:
+ VERIFY(altq_allowed);
+ /* CHANGEALTQ not supported yet! */
+ error = ENODEV;
+ break;
+
+ case DIOCGETQSTATS: {
+ struct pfioc_qstats *pq = (struct pfioc_qstats *)(void *)addr;
+ struct pf_altq *altq;
+ u_int32_t nr, pq_nr, ticket;
+ int nbytes;
+
+ VERIFY(altq_allowed);
+ bcopy(&pq->ticket, &ticket, sizeof (ticket));
+ if (ticket != ticket_altqs_active) {
+ error = EBUSY;
+ break;
+ }
+ bcopy(&pq->nr, &pq_nr, sizeof (pq_nr));
+ nr = 0;
+ altq = TAILQ_FIRST(pf_altqs_active);
+ while ((altq != NULL) && (nr < pq_nr)) {
+ altq = TAILQ_NEXT(altq, entries);
+ nr++;
+ }
+ if (altq == NULL) {
+ error = EBUSY;
+ break;
+ }
+ bcopy(&pq->nbytes, &nbytes, sizeof (nbytes));
+ error = altq_getqstats(altq, pq->buf, &nbytes);
+ if (error == 0) {
+ pq->scheduler = altq->scheduler;
+ bcopy(&nbytes, &pq->nbytes, sizeof (nbytes));
+ }
+ break;
+ }
+#endif /* PF_ALTQ */
+
+ case DIOCBEGINADDRS: /* struct pfioc_pooladdr */
+ case DIOCADDADDR: /* struct pfioc_pooladdr */
+ case DIOCGETADDRS: /* struct pfioc_pooladdr */
+ case DIOCGETADDR: /* struct pfioc_pooladdr */
+ case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */
+ struct pfioc_pooladdr *pp = NULL;
+
+ PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;)
+ error = pfioctl_ioc_pooladdr(cmd, pp, p);
+ PFIOC_STRUCT_END(pp, addr);
+ break;
+ }
+
+ case DIOCGETRULESETS: /* struct pfioc_ruleset */
+ case DIOCGETRULESET: { /* struct pfioc_ruleset */
+ struct pfioc_ruleset *pr = NULL;
+
+ PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;);
+ error = pfioctl_ioc_ruleset(cmd, pr, p);
+ PFIOC_STRUCT_END(pr, addr);
+ break;
+ }
+
+ case DIOCRCLRTABLES: /* struct pfioc_table */
+ case DIOCRADDTABLES: /* struct pfioc_table */
+ case DIOCRDELTABLES: /* struct pfioc_table */
+ case DIOCRGETTABLES: /* struct pfioc_table */
+ case DIOCRGETTSTATS: /* struct pfioc_table */
+ case DIOCRCLRTSTATS: /* struct pfioc_table */
+ case DIOCRSETTFLAGS: /* struct pfioc_table */
+ case DIOCRCLRADDRS: /* struct pfioc_table */
+ case DIOCRADDADDRS: /* struct pfioc_table */
+ case DIOCRDELADDRS: /* struct pfioc_table */
+ case DIOCRSETADDRS: /* struct pfioc_table */
+ case DIOCRGETADDRS: /* struct pfioc_table */
+ case DIOCRGETASTATS: /* struct pfioc_table */
+ case DIOCRCLRASTATS: /* struct pfioc_table */
+ case DIOCRTSTADDRS: /* struct pfioc_table */
+ case DIOCRINADEFINE: { /* struct pfioc_table */
+ PFIOCX_STRUCT_DECL(pfioc_table);
+
+ PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;);
+ error = pfioctl_ioc_table(cmd,
+ PFIOCX_STRUCT_ADDR32(pfioc_table),
+ PFIOCX_STRUCT_ADDR64(pfioc_table), p);
+ PFIOCX_STRUCT_END(pfioc_table, addr);
+ break;
+ }
+
+ case DIOCOSFPADD: /* struct pf_osfp_ioctl */
+ case DIOCOSFPGET: { /* struct pf_osfp_ioctl */
+ struct pf_osfp_ioctl *io = NULL;
+
+ PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;);
+ if (cmd == DIOCOSFPADD) {
+ error = pf_osfp_add(io);
+ } else {
+ VERIFY(cmd == DIOCOSFPGET);
+ error = pf_osfp_get(io);
+ }
+ PFIOC_STRUCT_END(io, addr);
+ break;
+ }
+
+ case DIOCXBEGIN: /* struct pfioc_trans */
+ case DIOCXROLLBACK: /* struct pfioc_trans */
+ case DIOCXCOMMIT: { /* struct pfioc_trans */
+ PFIOCX_STRUCT_DECL(pfioc_trans);
+
+ PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;);
+ error = pfioctl_ioc_trans(cmd,
+ PFIOCX_STRUCT_ADDR32(pfioc_trans),
+ PFIOCX_STRUCT_ADDR64(pfioc_trans), p);
+ PFIOCX_STRUCT_END(pfioc_trans, addr);
+ break;
+ }
+
+ case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */
+ PFIOCX_STRUCT_DECL(pfioc_src_nodes);
+
+ PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes,
+ error = ENOMEM; break;);
+ error = pfioctl_ioc_src_nodes(cmd,
+ PFIOCX_STRUCT_ADDR32(pfioc_src_nodes),
+ PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p);
+ PFIOCX_STRUCT_END(pfioc_src_nodes, addr);
+ break;
+ }
+
+ case DIOCCLRSRCNODES: {
+ struct pf_src_node *n;
+ struct pf_state *state;
+
+ RB_FOREACH(state, pf_state_tree_id, &tree_id) {
+ state->src_node = NULL;
+ state->nat_src_node = NULL;
+ }
+ RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
+ n->expire = 1;
+ n->states = 0;
+ }
+ pf_purge_expired_src_nodes();
+ pf_status.src_nodes = 0;
+ break;
+ }
+
+ case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */
+ struct pfioc_src_node_kill *psnk = NULL;
+
+ PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;);
+ error = pfioctl_ioc_src_node_kill(cmd, psnk, p);
+ PFIOC_STRUCT_END(psnk, addr);
+ break;
+ }
+
+ case DIOCSETHOSTID: { /* u_int32_t */
+ u_int32_t hid;
+
+ /* small enough to be on stack */
+ bcopy(addr, &hid, sizeof (hid));
+ if (hid == 0)
+ pf_status.hostid = random();
+ else
+ pf_status.hostid = hid;
+ break;
+ }
+
+ case DIOCOSFPFLUSH:
+ pf_osfp_flush();
+ break;
+
+ case DIOCIGETIFACES: /* struct pfioc_iface */
+ case DIOCSETIFFLAG: /* struct pfioc_iface */
+ case DIOCCLRIFFLAG: { /* struct pfioc_iface */
+ PFIOCX_STRUCT_DECL(pfioc_iface);
+
+ PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;);
+ error = pfioctl_ioc_iface(cmd,
+ PFIOCX_STRUCT_ADDR32(pfioc_iface),
+ PFIOCX_STRUCT_ADDR64(pfioc_iface), p);
+ PFIOCX_STRUCT_END(pfioc_iface, addr);
+ break;
+ }
+
+ default:
+ error = ENODEV;
+ break;
+ }
+
+ lck_mtx_unlock(pf_lock);
+ lck_rw_done(pf_perim_lock);
+
+ return (error);
+}
+
+static int
+pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32,
+ struct pfioc_table_64 *io64, struct proc *p)
+{
+ int p64 = proc_is64bit(p);
+ int error = 0;
+
+ if (!p64)
+ goto struct32;
+
+ /*
+ * 64-bit structure processing
+ */
+ switch (cmd) {
+ case DIOCRCLRTABLES:
+ if (io64->pfrio_esize != 0) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_clr_tables(&io64->pfrio_table, &io64->pfrio_ndel,
+ io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRADDTABLES:
+ if (io64->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_add_tables(io64->pfrio_buffer, io64->pfrio_size,
+ &io64->pfrio_nadd, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRDELTABLES:
+ if (io64->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_del_tables(io64->pfrio_buffer, io64->pfrio_size,
+ &io64->pfrio_ndel, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRGETTABLES:
+ if (io64->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_get_tables(&io64->pfrio_table, io64->pfrio_buffer,
+ &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRGETTSTATS:
+ if (io64->pfrio_esize != sizeof (struct pfr_tstats)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_get_tstats(&io64->pfrio_table, io64->pfrio_buffer,
+ &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRCLRTSTATS:
+ if (io64->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_clr_tstats(io64->pfrio_buffer, io64->pfrio_size,
+ &io64->pfrio_nzero, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRSETTFLAGS:
+ if (io64->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_set_tflags(io64->pfrio_buffer, io64->pfrio_size,
+ io64->pfrio_setflag, io64->pfrio_clrflag,
+ &io64->pfrio_nchange, &io64->pfrio_ndel,
+ io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRCLRADDRS:
+ if (io64->pfrio_esize != 0) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_clr_addrs(&io64->pfrio_table, &io64->pfrio_ndel,
+ io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRADDADDRS:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_add_addrs(&io64->pfrio_table, io64->pfrio_buffer,
+ io64->pfrio_size, &io64->pfrio_nadd, io64->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRDELADDRS:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_del_addrs(&io64->pfrio_table, io64->pfrio_buffer,
+ io64->pfrio_size, &io64->pfrio_ndel, io64->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRSETADDRS:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_set_addrs(&io64->pfrio_table, io64->pfrio_buffer,
+ io64->pfrio_size, &io64->pfrio_size2, &io64->pfrio_nadd,
+ &io64->pfrio_ndel, &io64->pfrio_nchange, io64->pfrio_flags |
+ PFR_FLAG_USERIOCTL, 0);
+ break;
+
+ case DIOCRGETADDRS:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_get_addrs(&io64->pfrio_table, io64->pfrio_buffer,
+ &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRGETASTATS:
+ if (io64->pfrio_esize != sizeof (struct pfr_astats)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_get_astats(&io64->pfrio_table, io64->pfrio_buffer,
+ &io64->pfrio_size, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRCLRASTATS:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_clr_astats(&io64->pfrio_table, io64->pfrio_buffer,
+ io64->pfrio_size, &io64->pfrio_nzero, io64->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRTSTADDRS:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_tst_addrs(&io64->pfrio_table, io64->pfrio_buffer,
+ io64->pfrio_size, &io64->pfrio_nmatch, io64->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRINADEFINE:
+ if (io64->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io64->pfrio_table);
+ error = pfr_ina_define(&io64->pfrio_table, io64->pfrio_buffer,
+ io64->pfrio_size, &io64->pfrio_nadd, &io64->pfrio_naddr,
+ io64->pfrio_ticket, io64->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+ goto done;
+
+struct32:
+ /*
+ * 32-bit structure processing
+ */
+ switch (cmd) {
+ case DIOCRCLRTABLES:
+ if (io32->pfrio_esize != 0) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_clr_tables(&io32->pfrio_table, &io32->pfrio_ndel,
+ io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRADDTABLES:
+ if (io32->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_add_tables(io32->pfrio_buffer, io32->pfrio_size,
+ &io32->pfrio_nadd, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRDELTABLES:
+ if (io32->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_del_tables(io32->pfrio_buffer, io32->pfrio_size,
+ &io32->pfrio_ndel, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRGETTABLES:
+ if (io32->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_get_tables(&io32->pfrio_table, io32->pfrio_buffer,
+ &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRGETTSTATS:
+ if (io32->pfrio_esize != sizeof (struct pfr_tstats)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_get_tstats(&io32->pfrio_table, io32->pfrio_buffer,
+ &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRCLRTSTATS:
+ if (io32->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_clr_tstats(io32->pfrio_buffer, io32->pfrio_size,
+ &io32->pfrio_nzero, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRSETTFLAGS:
+ if (io32->pfrio_esize != sizeof (struct pfr_table)) {
+ error = ENODEV;
+ break;
+ }
+ error = pfr_set_tflags(io32->pfrio_buffer, io32->pfrio_size,
+ io32->pfrio_setflag, io32->pfrio_clrflag,
+ &io32->pfrio_nchange, &io32->pfrio_ndel,
+ io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRCLRADDRS:
+ if (io32->pfrio_esize != 0) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_clr_addrs(&io32->pfrio_table, &io32->pfrio_ndel,
+ io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRADDADDRS:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_add_addrs(&io32->pfrio_table, io32->pfrio_buffer,
+ io32->pfrio_size, &io32->pfrio_nadd, io32->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRDELADDRS:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_del_addrs(&io32->pfrio_table, io32->pfrio_buffer,
+ io32->pfrio_size, &io32->pfrio_ndel, io32->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRSETADDRS:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_set_addrs(&io32->pfrio_table, io32->pfrio_buffer,
+ io32->pfrio_size, &io32->pfrio_size2, &io32->pfrio_nadd,
+ &io32->pfrio_ndel, &io32->pfrio_nchange, io32->pfrio_flags |
+ PFR_FLAG_USERIOCTL, 0);
+ break;
+
+ case DIOCRGETADDRS:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_get_addrs(&io32->pfrio_table, io32->pfrio_buffer,
+ &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRGETASTATS:
+ if (io32->pfrio_esize != sizeof (struct pfr_astats)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_get_astats(&io32->pfrio_table, io32->pfrio_buffer,
+ &io32->pfrio_size, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRCLRASTATS:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_clr_astats(&io32->pfrio_table, io32->pfrio_buffer,
+ io32->pfrio_size, &io32->pfrio_nzero, io32->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRTSTADDRS:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_tst_addrs(&io32->pfrio_table, io32->pfrio_buffer,
+ io32->pfrio_size, &io32->pfrio_nmatch, io32->pfrio_flags |
+ PFR_FLAG_USERIOCTL);
+ break;
+
+ case DIOCRINADEFINE:
+ if (io32->pfrio_esize != sizeof (struct pfr_addr)) {
+ error = ENODEV;
+ break;
+ }
+ pfr_table_copyin_cleanup(&io32->pfrio_table);
+ error = pfr_ina_define(&io32->pfrio_table, io32->pfrio_buffer,
+ io32->pfrio_size, &io32->pfrio_nadd, &io32->pfrio_naddr,
+ io32->pfrio_ticket, io32->pfrio_flags | PFR_FLAG_USERIOCTL);
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+done:
+ return (error);
+}
+
+static int
+pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32,
+ struct pfioc_tokens_64 *tok64, struct proc *p)
+{
+ struct pfioc_token *tokens;
+ struct pfioc_kernel_token *entry, *tmp;
+ user_addr_t token_buf;
+ int ocnt, cnt, error = 0, p64 = proc_is64bit(p);
+ char *ptr;
+
+ switch (cmd) {
+ case DIOCGETSTARTERS: {
+ int size;
+
+ if (nr_tokens == 0) {
+ error = ENOENT;
+ break;
+ }
+
+ size = sizeof (struct pfioc_token) * nr_tokens;
+ ocnt = cnt = (p64 ? tok64->size : tok32->size);
+ if (cnt == 0) {
+ if (p64)
+ tok64->size = size;
+ else
+ tok32->size = size;
+ break;
+ }
+
+ token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf);
+ tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO);
+ if (tokens == NULL) {
+ error = ENOMEM;
+ break;
+ }
+
+ ptr = (void *)tokens;
+ SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) {
+ struct pfioc_token *t;
+
+ if ((unsigned)cnt < sizeof (*tokens))
+ break; /* no more buffer space left */
+
+ t = (struct pfioc_token *)(void *)ptr;
+ t->token_value = entry->token.token_value;
+ t->timestamp = entry->token.timestamp;
+ t->pid = entry->token.pid;
+ bcopy(entry->token.proc_name, t->proc_name,
+ PFTOK_PROCNAME_LEN);
+ ptr += sizeof (struct pfioc_token);
+
+ cnt -= sizeof (struct pfioc_token);
+ }
+
+ if (cnt < ocnt)
+ error = copyout(tokens, token_buf, ocnt - cnt);
+
+ if (p64)
+ tok64->size = ocnt - cnt;
+ else
+ tok32->size = ocnt - cnt;
+
+ _FREE(tokens, M_TEMP);
+ break;
+ }
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return (error);
+}
+
+static void
+pf_expire_states_and_src_nodes(struct pf_rule *rule)
+{
+ struct pf_state *state;
+ struct pf_src_node *sn;
+ int killed = 0;
+
+ /* expire the states */
+ state = TAILQ_FIRST(&state_list);
+ while (state) {
+ if (state->rule.ptr == rule)
+ state->timeout = PFTM_PURGE;
+ state = TAILQ_NEXT(state, entry_list);
+ }
+ pf_purge_expired_states(pf_status.states);
+
+ /* expire the src_nodes */
+ RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) {
+ if (sn->rule.ptr != rule)
+ continue;
+ if (sn->states != 0) {
+ RB_FOREACH(state, pf_state_tree_id,
+ &tree_id) {
+ if (state->src_node == sn)
+ state->src_node = NULL;
+ if (state->nat_src_node == sn)
+ state->nat_src_node = NULL;
+ }
+ sn->states = 0;
+ }
+ sn->expire = 1;
+ killed++;
+ }
+ if (killed)
+ pf_purge_expired_src_nodes();
+}
+
+static void
+pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num,
+ struct pf_rule *rule)
+{
+ struct pf_rule *r;
+ int nr = 0;
+
+ pf_expire_states_and_src_nodes(rule);
+
+ pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule);
+ if (ruleset->rules[rs_num].active.rcount-- == 0)
+ panic("%s: rcount value broken!", __func__);
+ r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
+
+ while (r) {
+ r->nr = nr++;
+ r = TAILQ_NEXT(r, entries);
+ }
+}
+
+
+static void
+pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs)
+{
+ pf_calc_skip_steps(ruleset->rules[rs].active.ptr);
+ ruleset->rules[rs].active.ticket =
+ ++ruleset->rules[rs].inactive.ticket;
+}
+
+/*
+ * req_dev encodes the PF interface. Currently, possible values are
+ * 0 or PFRULE_PFM
+ */
+static int
+pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev)
+{
+ struct pf_ruleset *ruleset;
+ struct pf_rule *rule = NULL;
+ int is_anchor;
+ int error;
+ int i;
+
+ is_anchor = (pr->anchor_call[0] != '\0');
+ if ((ruleset = pf_find_ruleset_with_owner(pr->anchor,
+ pr->rule.owner, is_anchor, &error)) == NULL)
+ return (error);
+
+ for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) {
+ rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
+ while (rule && (rule->ticket != pr->rule.ticket))
+ rule = TAILQ_NEXT(rule, entries);
+ }
+ if (rule == NULL)
+ return (ENOENT);
+ else
+ i--;
+
+ if (strcmp(rule->owner, pr->rule.owner))
+ return (EACCES);
+
+delete_rule:
+ if (rule->anchor && (ruleset != &pf_main_ruleset) &&
+ ((strcmp(ruleset->anchor->owner, "")) == 0) &&
+ ((ruleset->rules[i].active.rcount - 1) == 0)) {
+ /* set rule & ruleset to parent and repeat */
+ struct pf_rule *delete_rule = rule;
+ struct pf_ruleset *delete_ruleset = ruleset;
+
+#define parent_ruleset ruleset->anchor->parent->ruleset
+ if (ruleset->anchor->parent == NULL)
+ ruleset = &pf_main_ruleset;
+ else
+ ruleset = &parent_ruleset;
+
+ rule = TAILQ_FIRST(ruleset->rules[i].active.ptr);
+ while (rule &&
+ (rule->anchor != delete_ruleset->anchor))
+ rule = TAILQ_NEXT(rule, entries);
+ if (rule == NULL)
+ panic("%s: rule not found!", __func__);
+
+ /*
+ * if reqest device != rule's device, bail :
+ * with error if ticket matches;
+ * without error if ticket doesn't match (i.e. its just cleanup)
+ */
+ if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
+ if (rule->ticket != pr->rule.ticket) {
+ return (0);
+ } else {
+ return EACCES;
+ }
+ }
+
+ if (delete_rule->rule_flag & PFRULE_PFM) {
+ pffwrules--;
+ }
+
+ pf_delete_rule_from_ruleset(delete_ruleset,
+ i, delete_rule);
+ delete_ruleset->rules[i].active.ticket =
+ ++delete_ruleset->rules[i].inactive.ticket;
+ goto delete_rule;
+ } else {
+ /*
+ * process deleting rule only if device that added the
+ * rule matches device that issued the request
+ */
+ if ((rule->rule_flag & PFRULE_PFM) ^ req_dev)
+ return EACCES;
+ if (rule->rule_flag & PFRULE_PFM)
+ pffwrules--;
+ pf_delete_rule_from_ruleset(ruleset, i,
+ rule);
+ pf_ruleset_cleanup(ruleset, i);
+ }
+
+ return (0);
+}
+
+/*
+ * req_dev encodes the PF interface. Currently, possible values are
+ * 0 or PFRULE_PFM
+ */
+static void
+pf_delete_rule_by_owner(char *owner, u_int32_t req_dev)
+{
+ struct pf_ruleset *ruleset;
+ struct pf_rule *rule, *next;
+ int deleted = 0;
+
+ for (int rs = 0; rs < PF_RULESET_MAX; rs++) {
+ rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr);
+ ruleset = &pf_main_ruleset;
+ while (rule) {
+ next = TAILQ_NEXT(rule, entries);
+ /*
+ * process deleting rule only if device that added the
+ * rule matches device that issued the request
+ */
+ if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) {
+ rule = next;
+ continue;
+ }
+ if (rule->anchor) {
+ if (((strcmp(rule->owner, owner)) == 0) ||
+ ((strcmp(rule->owner, "")) == 0)) {
+ if (rule->anchor->ruleset.rules[rs].active.rcount > 0) {
+ if (deleted) {
+ pf_ruleset_cleanup(ruleset, rs);
+ deleted = 0;
+ }
+ /* step into anchor */
+ ruleset =
+ &rule->anchor->ruleset;
+ rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
+ continue;
+ } else {
+ if (rule->rule_flag &
+ PFRULE_PFM)
+ pffwrules--;
+ pf_delete_rule_from_ruleset(ruleset, rs, rule);
+ deleted = 1;
+ rule = next;
+ }
+ } else
+ rule = next;
+ } else {
+ if (((strcmp(rule->owner, owner)) == 0)) {
+ /* delete rule */
+ if (rule->rule_flag & PFRULE_PFM)
+ pffwrules--;
+ pf_delete_rule_from_ruleset(ruleset,
+ rs, rule);
+ deleted = 1;
+ }
+ rule = next;
+ }
+ if (rule == NULL) {
+ if (deleted) {
+ pf_ruleset_cleanup(ruleset, rs);
+ deleted = 0;
+ }
+ if (ruleset != &pf_main_ruleset)
+ pf_deleterule_anchor_step_out(&ruleset,
+ rs, &rule);
+ }
+ }
+ }
+}
+
+static void
+pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr,
+ int rs, struct pf_rule **rule_ptr)
+{
+ struct pf_ruleset *ruleset = *ruleset_ptr;
+ struct pf_rule *rule = *rule_ptr;
+
+ /* step out of anchor */
+ struct pf_ruleset *rs_copy = ruleset;
+ ruleset = ruleset->anchor->parent?
+ &ruleset->anchor->parent->ruleset:&pf_main_ruleset;
+
+ rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr);
+ while (rule && (rule->anchor != rs_copy->anchor))
+ rule = TAILQ_NEXT(rule, entries);
+ if (rule == NULL)
+ panic("%s: parent rule of anchor not found!", __func__);
+ if (rule->anchor->ruleset.rules[rs].active.rcount > 0)
+ rule = TAILQ_NEXT(rule, entries);
+
+ *ruleset_ptr = ruleset;
+ *rule_ptr = rule;
+}
+
+static void
+pf_addrwrap_setup(struct pf_addr_wrap *aw)
+{
+ VERIFY(aw);
+ bzero(&aw->p, sizeof aw->p);
+}
+
+static int
+pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule,
+ struct pf_ruleset *ruleset) {
+ struct pf_pooladdr *apa;
+ int error = 0;
+
+ if (rule->ifname[0]) {
+ rule->kif = pfi_kif_get(rule->ifname);
+ if (rule->kif == NULL) {
+ pool_put(&pf_rule_pl, rule);
+ return (EINVAL);
+ }
+ pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE);
+ }
+#if PF_ALTQ
+ /* set queue IDs */
+ if (altq_allowed && rule->qname[0] != '\0') {
+ if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
+ error = EBUSY;
+ else if (rule->pqname[0] != '\0') {
+ if ((rule->pqid =
+ pf_qname2qid(rule->pqname)) == 0)
+ error = EBUSY;
+ } else
+ rule->pqid = rule->qid;
+ }
+#endif /* PF_ALTQ */
+ if (rule->tagname[0])
+ if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
+ error = EBUSY;
+ if (rule->match_tagname[0])
+ if ((rule->match_tag =
+ pf_tagname2tag(rule->match_tagname)) == 0)
+ error = EBUSY;
+ if (rule->rt && !rule->direction)
+ error = EINVAL;
+#if PFLOG
+ if (!rule->log)
+ rule->logif = 0;
+ if (rule->logif >= PFLOGIFS_MAX)
+ error = EINVAL;
+#endif /* PFLOG */
+ pf_addrwrap_setup(&rule->src.addr);
+ pf_addrwrap_setup(&rule->dst.addr);
+ if (pf_rtlabel_add(&rule->src.addr) ||
+ pf_rtlabel_add(&rule->dst.addr))
+ error = EBUSY;
+ if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
+ error = EINVAL;
+ if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
+ error = EINVAL;
+ if (pf_tbladdr_setup(ruleset, &rule->src.addr))
+ error = EINVAL;
+ if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
+ error = EINVAL;
+ if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
+ error = EINVAL;
+ TAILQ_FOREACH(apa, &pf_pabuf, entries)
+ if (pf_tbladdr_setup(ruleset, &apa->addr))
+ error = EINVAL;
+
+ if (rule->overload_tblname[0]) {
+ if ((rule->overload_tbl = pfr_attach_table(ruleset,
+ rule->overload_tblname)) == NULL)
+ error = EINVAL;
+ else
+ rule->overload_tbl->pfrkt_flags |=
+ PFR_TFLAG_ACTIVE;
+ }
+
+ pf_mv_pool(&pf_pabuf, &rule->rpool.list);
+ if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
+ (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
+ (rule->rt > PF_FASTROUTE)) &&
+ (TAILQ_FIRST(&rule->rpool.list) == NULL))
+ error = EINVAL;
+
+ if (error) {
+ pf_rm_rule(NULL, rule);
+ return (error);
+ }
+ rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
+ rule->evaluations = rule->packets[0] = rule->packets[1] =
+ rule->bytes[0] = rule->bytes[1] = 0;
+
+ return (0);
+}
+
+static int
+pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p)
+{
+ int error = 0;
+ u_int32_t req_dev = 0;
+
+ switch (cmd) {
+ case DIOCADDRULE: {
+ struct pf_ruleset *ruleset;
+ struct pf_rule *rule, *tail;
+ int rs_num;
+
+ pr->anchor[sizeof (pr->anchor) - 1] = '\0';
+ pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0';
+ ruleset = pf_find_ruleset(pr->anchor);
+ if (ruleset == NULL) {
+ error = EINVAL;
+ break;
+ }
+ rs_num = pf_get_ruleset_number(pr->rule.action);
+ if (rs_num >= PF_RULESET_MAX) {