]> git.saurik.com Git - apple/network_cmds.git/blob - pcap/gencode.c
network_cmds-77.tar.gz
[apple/network_cmds.git] / pcap / gencode.c
1 /*
2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
7 * Reserved. This file contains Original Code and/or Modifications of
8 * Original Code as defined in and that are subject to the Apple Public
9 * Source License Version 1.0 (the 'License'). You may not use this file
10 * except in compliance with the License. Please obtain a copy of the
11 * License at http://www.apple.com/publicsource and read it before using
12 * this file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
19 * License for the specific language governing rights and limitations
20 * under the License."
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24 /* $OpenBSD: gencode.c,v 1.5 1996/09/16 02:33:05 tholo Exp $ */
25
26 /*
27 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996
28 * The Regents of the University of California. All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that: (1) source code distributions
32 * retain the above copyright notice and this paragraph in its entirety, (2)
33 * distributions including binary code include the above copyright notice and
34 * this paragraph in its entirety in the documentation or other materials
35 * provided with the distribution, and (3) all advertising materials mentioning
36 * features or use of this software display the following acknowledgement:
37 * ``This product includes software developed by the University of California,
38 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
39 * the University nor the names of its contributors may be used to endorse
40 * or promote products derived from this software without specific prior
41 * written permission.
42 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
43 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
45 */
46 #ifndef lint
47 static char rcsid[] =
48 "@(#) Header: gencode.c,v 1.81 96/06/19 23:09:09 leres Exp (LBL)";
49 #endif
50
51 #include <sys/types.h>
52 #include <sys/socket.h>
53 #include <sys/time.h>
54
55 #if __STDC__
56 struct mbuf;
57 struct rtentry;
58 #endif
59
60 #include <net/if.h>
61 #include <net/bpf.h>
62
63 #include <netinet/in.h>
64 #include <netinet/if_ether.h>
65
66 #include <stdlib.h>
67 #include <memory.h>
68 #include <pcap.h>
69 #include <pcap-namedb.h>
70 #include <setjmp.h>
71 #if __STDC__
72 #include <stdarg.h>
73 #else
74 #include <varargs.h>
75 #endif
76
77 #ifdef HAVE_OS_PROTO_H
78 #include "os-proto.h"
79 #endif
80
81 #include "pcap-int.h"
82
83 #include "gencode.h"
84
85 #ifndef ETHERTYPE_REVARP
86 #define ETHERTYPE_REVARP 0x8035
87 #endif
88 #ifndef ETHERTYPE_MOPDL
89 #define ETHERTYPE_MOPDL 0x6001
90 #endif
91 #ifndef ETHERTYPE_MOPRC
92 #define ETHERTYPE_MOPRC 0x6002
93 #endif
94 #ifndef ETHERTYPE_DN
95 #define ETHERTYPE_DN 0x6003
96 #endif
97 #ifndef ETHERTYPE_LAT
98 #define ETHERTYPE_LAT 0x6004
99 #endif
100
101 #define JMP(c) ((c)|BPF_JMP|BPF_K)
102
103 /* Locals */
104 static jmp_buf top_ctx;
105 static pcap_t *bpf_pcap;
106
107 /* XXX */
108 #ifdef PCAP_FDDIPAD
109 int pcap_fddipad = PCAP_FDDIPAD;
110 #else
111 int pcap_fddipad;
112 #endif
113 #ifndef DLT_ATM_RFC1483
114 #define DLT_ATM_RFC1483 11
115 #endif
116
117
118 /* VARARGS */
119 __dead void
120 #if __STDC__
121 bpf_error(const char *fmt, ...)
122 #else
123 bpf_error(fmt, va_alist)
124 const char *fmt;
125 va_dcl
126 #endif
127 {
128 va_list ap;
129
130 #if __STDC__
131 va_start(ap, fmt);
132 #else
133 va_start(ap);
134 #endif
135 if (bpf_pcap != NULL)
136 (void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap);
137 va_end(ap);
138 longjmp(top_ctx, 1);
139 /* NOTREACHED */
140 }
141
142 static void init_linktype(int);
143
144 static int alloc_reg(void);
145 static void free_reg(int);
146
147 static struct block *root;
148
149 /*
150 * We divy out chunks of memory rather than call malloc each time so
151 * we don't have to worry about leaking memory. It's probably
152 * not a big deal if all this memory was wasted but it this ever
153 * goes into a library that would probably not be a good idea.
154 */
155 #define NCHUNKS 16
156 #define CHUNK0SIZE 1024
157 struct chunk {
158 u_int n_left;
159 void *m;
160 };
161
162 static struct chunk chunks[NCHUNKS];
163 static int cur_chunk;
164
165 static void *newchunk(u_int);
166 static void freechunks(void);
167 static __inline struct block *new_block(int);
168 static __inline struct slist *new_stmt(int);
169 static struct block *gen_retblk(int);
170 static __inline void syntax(void);
171
172 static void backpatch(struct block *, struct block *);
173 static void merge(struct block *, struct block *);
174 static struct block *gen_cmp(u_int, u_int, bpf_int32);
175 static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32);
176 static struct block *gen_bcmp(u_int, u_int, u_char *);
177 static struct block *gen_uncond(int);
178 static __inline struct block *gen_true(void);
179 static __inline struct block *gen_false(void);
180 static struct block *gen_linktype(int);
181 static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int);
182 static struct block *gen_ehostop(u_char *, int);
183 static struct block *gen_fhostop(u_char *, int);
184 static struct block *gen_dnhostop(bpf_u_int32, int, u_int);
185 static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int);
186 static struct block *gen_gateway(u_char *, bpf_u_int32 **, int, int);
187 static struct block *gen_ipfrag(void);
188 static struct block *gen_portatom(int, bpf_int32);
189 struct block *gen_portop(int, int, int);
190 static struct block *gen_port(int, int, int);
191 static int lookup_proto(char *, int);
192 static struct block *gen_proto(int, int, int);
193 static bpf_u_int32 net_mask(bpf_u_int32 *);
194 static struct slist *xfer_to_x(struct arth *);
195 static struct slist *xfer_to_a(struct arth *);
196 static struct block *gen_len(int, int);
197
198 static void *
199 newchunk(n)
200 u_int n;
201 {
202 struct chunk *cp;
203 int k, size;
204
205 /* XXX Round up to nearest long. */
206 n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1);
207
208 cp = &chunks[cur_chunk];
209 if (n > cp->n_left) {
210 ++cp, k = ++cur_chunk;
211 if (k >= NCHUNKS)
212 bpf_error("out of memory");
213 size = CHUNK0SIZE << k;
214 cp->m = (void *)malloc(size);
215 memset((char *)cp->m, 0, size);
216 cp->n_left = size;
217 if (n > size)
218 bpf_error("out of memory");
219 }
220 cp->n_left -= n;
221 return (void *)((char *)cp->m + cp->n_left);
222 }
223
224 static void
225 freechunks()
226 {
227 int i;
228
229 cur_chunk = 0;
230 for (i = 0; i < NCHUNKS; ++i)
231 if (chunks[i].m != NULL) {
232 free(chunks[i].m);
233 chunks[i].m = NULL;
234 }
235 }
236
237 /*
238 * A strdup whose allocations are freed after code generation is over.
239 */
240 char *
241 sdup(s)
242 char *s;
243 {
244 int n = strlen(s) + 1;
245 char *cp = newchunk(n);
246 strcpy(cp, s);
247 return (cp);
248 }
249
250 static __inline struct block *
251 new_block(code)
252 int code;
253 {
254 struct block *p;
255
256 p = (struct block *)newchunk(sizeof(*p));
257 p->s.code = code;
258 p->head = p;
259
260 return p;
261 }
262
263 static __inline struct slist *
264 new_stmt(code)
265 int code;
266 {
267 struct slist *p;
268
269 p = (struct slist *)newchunk(sizeof(*p));
270 p->s.code = code;
271
272 return p;
273 }
274
275 static struct block *
276 gen_retblk(v)
277 int v;
278 {
279 struct block *b = new_block(BPF_RET|BPF_K);
280
281 b->s.k = v;
282 return b;
283 }
284
285 static __inline void
286 syntax()
287 {
288 bpf_error("syntax error in filter expression");
289 }
290
291 static bpf_u_int32 netmask;
292 static int snaplen;
293
294 int
295 pcap_compile(pcap_t *p, struct bpf_program *program,
296 char *buf, int optimize, bpf_u_int32 mask)
297 {
298 extern int n_errors;
299 int len;
300
301 n_errors = 0;
302 root = NULL;
303 bpf_pcap = p;
304 if (setjmp(top_ctx)) {
305 freechunks();
306 return (-1);
307 }
308
309 netmask = mask;
310 snaplen = pcap_snapshot(p);
311
312 lex_init(buf ? buf : "");
313 init_linktype(pcap_datalink(p));
314 (void)pcap_parse();
315
316 if (n_errors)
317 syntax();
318
319 if (root == NULL)
320 root = gen_retblk(snaplen);
321
322 if (optimize) {
323 bpf_optimize(&root);
324 if (root == NULL ||
325 (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0))
326 bpf_error("expression rejects all packets");
327 }
328 program->bf_insns = icode_to_fcode(root, &len);
329 program->bf_len = len;
330
331 freechunks();
332 return (0);
333 }
334
335 /*
336 * Backpatch the blocks in 'list' to 'target'. The 'sense' field indicates
337 * which of the jt and jf fields has been resolved and which is a pointer
338 * back to another unresolved block (or nil). At least one of the fields
339 * in each block is already resolved.
340 */
341 static void
342 backpatch(list, target)
343 struct block *list, *target;
344 {
345 struct block *next;
346
347 while (list) {
348 if (!list->sense) {
349 next = JT(list);
350 JT(list) = target;
351 } else {
352 next = JF(list);
353 JF(list) = target;
354 }
355 list = next;
356 }
357 }
358
359 /*
360 * Merge the lists in b0 and b1, using the 'sense' field to indicate
361 * which of jt and jf is the link.
362 */
363 static void
364 merge(b0, b1)
365 struct block *b0, *b1;
366 {
367 register struct block **p = &b0;
368
369 /* Find end of list. */
370 while (*p)
371 p = !((*p)->sense) ? &JT(*p) : &JF(*p);
372
373 /* Concatenate the lists. */
374 *p = b1;
375 }
376
377 void
378 finish_parse(p)
379 struct block *p;
380 {
381 backpatch(p, gen_retblk(snaplen));
382 p->sense = !p->sense;
383 backpatch(p, gen_retblk(0));
384 root = p->head;
385 }
386
387 void
388 gen_and(b0, b1)
389 struct block *b0, *b1;
390 {
391 backpatch(b0, b1->head);
392 b0->sense = !b0->sense;
393 b1->sense = !b1->sense;
394 merge(b1, b0);
395 b1->sense = !b1->sense;
396 b1->head = b0->head;
397 }
398
399 void
400 gen_or(b0, b1)
401 struct block *b0, *b1;
402 {
403 b0->sense = !b0->sense;
404 backpatch(b0, b1->head);
405 b0->sense = !b0->sense;
406 merge(b1, b0);
407 b1->head = b0->head;
408 }
409
410 void
411 gen_not(b)
412 struct block *b;
413 {
414 b->sense = !b->sense;
415 }
416
417 static struct block *
418 gen_cmp(offset, size, v)
419 u_int offset, size;
420 bpf_int32 v;
421 {
422 struct slist *s;
423 struct block *b;
424
425 s = new_stmt(BPF_LD|BPF_ABS|size);
426 s->s.k = offset;
427
428 b = new_block(JMP(BPF_JEQ));
429 b->stmts = s;
430 b->s.k = v;
431
432 return b;
433 }
434
435 static struct block *
436 gen_mcmp(offset, size, v, mask)
437 u_int offset, size;
438 bpf_int32 v;
439 bpf_u_int32 mask;
440 {
441 struct block *b = gen_cmp(offset, size, v);
442 struct slist *s;
443
444 if (mask != 0xffffffff) {
445 s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
446 s->s.k = mask;
447 b->stmts->next = s;
448 }
449 return b;
450 }
451
452 static struct block *
453 gen_bcmp(offset, size, v)
454 u_int offset, size;
455 u_char *v;
456 {
457 struct block *b, *tmp;
458
459 b = NULL;
460 while (size >= 4) {
461 u_char *p = &v[size - 4];
462 bpf_int32 w = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
463 tmp = gen_cmp(offset + size - 4, BPF_W, w);
464 if (b != NULL)
465 gen_and(b, tmp);
466 b = tmp;
467 size -= 4;
468 }
469 while (size >= 2) {
470 u_char *p = &v[size - 2];
471 bpf_int32 w = (p[0] << 8) | p[1];
472 tmp = gen_cmp(offset + size - 2, BPF_H, w);
473 if (b != NULL)
474 gen_and(b, tmp);
475 b = tmp;
476 size -= 2;
477 }
478 if (size > 0) {
479 tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]);
480 if (b != NULL)
481 gen_and(b, tmp);
482 b = tmp;
483 }
484 return b;
485 }
486
487 /*
488 * Various code constructs need to know the layout of the data link
489 * layer. These variables give the necessary offsets. off_linktype
490 * is set to -1 for no encapsulation, in which case, IP is assumed.
491 */
492 static u_int off_linktype;
493 static u_int off_nl;
494 static int linktype;
495
496 static void
497 init_linktype(type)
498 int type;
499 {
500 linktype = type;
501
502 switch (type) {
503
504 case DLT_EN10MB:
505 off_linktype = 12;
506 off_nl = 14;
507 return;
508
509 case DLT_SLIP:
510 /*
511 * SLIP doesn't have a link level type. The 16 byte
512 * header is hacked into our SLIP driver.
513 */
514 off_linktype = -1;
515 off_nl = 16;
516 return;
517
518 case DLT_NULL:
519 off_linktype = -1;
520 off_nl = 0;
521 return;
522
523 case DLT_PPP:
524 off_linktype = 2;
525 off_nl = 4;
526 return;
527
528 case DLT_FDDI:
529 /*
530 * FDDI doesn't really have a link-level type field.
531 * We assume that SSAP = SNAP is being used and pick
532 * out the encapsulated Ethernet type.
533 */
534 off_linktype = 19;
535 #ifdef PCAP_FDDIPAD
536 off_linktype += pcap_fddipad;
537 #endif
538 off_nl = 21;
539 #ifdef PCAP_FDDIPAD
540 off_nl += pcap_fddipad;
541 #endif
542 return;
543
544 case DLT_IEEE802:
545 off_linktype = 20;
546 off_nl = 22;
547 return;
548
549 case DLT_ATM_RFC1483:
550 /*
551 * assume routed, non-ISO PDUs
552 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00)
553 */
554 off_linktype = 6;
555 off_nl = 8;
556 return;
557 }
558 bpf_error("unknown data link type 0x%x", linktype);
559 /* NOTREACHED */
560 }
561
562 static struct block *
563 gen_uncond(rsense)
564 int rsense;
565 {
566 struct block *b;
567 struct slist *s;
568
569 s = new_stmt(BPF_LD|BPF_IMM);
570 s->s.k = !rsense;
571 b = new_block(JMP(BPF_JEQ));
572 b->stmts = s;
573
574 return b;
575 }
576
577 static __inline struct block *
578 gen_true()
579 {
580 return gen_uncond(1);
581 }
582
583 static __inline struct block *
584 gen_false()
585 {
586 return gen_uncond(0);
587 }
588
589 static struct block *
590 gen_linktype(proto)
591 int proto;
592 {
593 switch (linktype) {
594 case DLT_SLIP:
595 if (proto == ETHERTYPE_IP)
596 return gen_true();
597 else
598 return gen_false();
599
600 case DLT_PPP:
601 if (proto == ETHERTYPE_IP)
602 proto = 0x0021; /* XXX - need ppp.h defs */
603 break;
604
605 case DLT_NULL:
606 /* XXX */
607 if (proto == ETHERTYPE_IP)
608 return (gen_cmp(0, BPF_W, (bpf_int32)AF_INET));
609 else
610 return gen_false();
611 }
612 return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto);
613 }
614
615 static struct block *
616 gen_hostop(addr, mask, dir, proto, src_off, dst_off)
617 bpf_u_int32 addr;
618 bpf_u_int32 mask;
619 int dir, proto;
620 u_int src_off, dst_off;
621 {
622 struct block *b0, *b1;
623 u_int offset;
624
625 switch (dir) {
626
627 case Q_SRC:
628 offset = src_off;
629 break;
630
631 case Q_DST:
632 offset = dst_off;
633 break;
634
635 case Q_AND:
636 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
637 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
638 gen_and(b0, b1);
639 return b1;
640
641 case Q_OR:
642 case Q_DEFAULT:
643 b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
644 b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
645 gen_or(b0, b1);
646 return b1;
647
648 default:
649 abort();
650 }
651 b0 = gen_linktype(proto);
652 b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask);
653 gen_and(b0, b1);
654 return b1;
655 }
656
657 static struct block *
658 gen_ehostop(eaddr, dir)
659 u_char *eaddr;
660 int dir;
661 {
662 struct block *b0, *b1;
663
664 switch (dir) {
665 case Q_SRC:
666 return gen_bcmp(6, 6, eaddr);
667
668 case Q_DST:
669 return gen_bcmp(0, 6, eaddr);
670
671 case Q_AND:
672 b0 = gen_ehostop(eaddr, Q_SRC);
673 b1 = gen_ehostop(eaddr, Q_DST);
674 gen_and(b0, b1);
675 return b1;
676
677 case Q_DEFAULT:
678 case Q_OR:
679 b0 = gen_ehostop(eaddr, Q_SRC);
680 b1 = gen_ehostop(eaddr, Q_DST);
681 gen_or(b0, b1);
682 return b1;
683 }
684 abort();
685 /* NOTREACHED */
686 }
687
688 /*
689 * Like gen_ehostop, but for DLT_FDDI
690 */
691 static struct block *
692 gen_fhostop(eaddr, dir)
693 u_char *eaddr;
694 int dir;
695 {
696 struct block *b0, *b1;
697
698 switch (dir) {
699 case Q_SRC:
700 #ifdef PCAP_FDDIPAD
701 return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr);
702 #else
703 return gen_bcmp(6 + 1, 6, eaddr);
704 #endif
705
706 case Q_DST:
707 #ifdef PCAP_FDDIPAD
708 return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr);
709 #else
710 return gen_bcmp(0 + 1, 6, eaddr);
711 #endif
712
713 case Q_AND:
714 b0 = gen_fhostop(eaddr, Q_SRC);
715 b1 = gen_fhostop(eaddr, Q_DST);
716 gen_and(b0, b1);
717 return b1;
718
719 case Q_DEFAULT:
720 case Q_OR:
721 b0 = gen_fhostop(eaddr, Q_SRC);
722 b1 = gen_fhostop(eaddr, Q_DST);
723 gen_or(b0, b1);
724 return b1;
725 }
726 abort();
727 /* NOTREACHED */
728 }
729
730 /*
731 * This is quite tricky because there may be pad bytes in front of the
732 * DECNET header, and then there are two possible data packet formats that
733 * carry both src and dst addresses, plus 5 packet types in a format that
734 * carries only the src node, plus 2 types that use a different format and
735 * also carry just the src node.
736 *
737 * Yuck.
738 *
739 * Instead of doing those all right, we just look for data packets with
740 * 0 or 1 bytes of padding. If you want to look at other packets, that
741 * will require a lot more hacking.
742 *
743 * To add support for filtering on DECNET "areas" (network numbers)
744 * one would want to add a "mask" argument to this routine. That would
745 * make the filter even more inefficient, although one could be clever
746 * and not generate masking instructions if the mask is 0xFFFF.
747 */
748 static struct block *
749 gen_dnhostop(addr, dir, base_off)
750 bpf_u_int32 addr;
751 int dir;
752 u_int base_off;
753 {
754 struct block *b0, *b1, *b2, *tmp;
755 u_int offset_lh; /* offset if long header is received */
756 u_int offset_sh; /* offset if short header is received */
757
758 switch (dir) {
759
760 case Q_DST:
761 offset_sh = 1; /* follows flags */
762 offset_lh = 7; /* flgs,darea,dsubarea,HIORD */
763 break;
764
765 case Q_SRC:
766 offset_sh = 3; /* follows flags, dstnode */
767 offset_lh = 15; /* flgs,darea,dsubarea,did,sarea,ssub,HIORD */
768 break;
769
770 case Q_AND:
771 /* Inefficient because we do our Calvinball dance twice */
772 b0 = gen_dnhostop(addr, Q_SRC, base_off);
773 b1 = gen_dnhostop(addr, Q_DST, base_off);
774 gen_and(b0, b1);
775 return b1;
776
777 case Q_OR:
778 case Q_DEFAULT:
779 /* Inefficient because we do our Calvinball dance twice */
780 b0 = gen_dnhostop(addr, Q_SRC, base_off);
781 b1 = gen_dnhostop(addr, Q_DST, base_off);
782 gen_or(b0, b1);
783 return b1;
784
785 default:
786 abort();
787 }
788 b0 = gen_linktype(ETHERTYPE_DN);
789 /* Check for pad = 1, long header case */
790 tmp = gen_mcmp(base_off + 2, BPF_H,
791 (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF));
792 b1 = gen_cmp(base_off + 2 + 1 + offset_lh,
793 BPF_H, (bpf_int32)ntohs(addr));
794 gen_and(tmp, b1);
795 /* Check for pad = 0, long header case */
796 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7);
797 b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr));
798 gen_and(tmp, b2);
799 gen_or(b2, b1);
800 /* Check for pad = 1, short header case */
801 tmp = gen_mcmp(base_off + 2, BPF_H,
802 (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF));
803 b2 = gen_cmp(base_off + 2 + 1 + offset_sh,
804 BPF_H, (bpf_int32)ntohs(addr));
805 gen_and(tmp, b2);
806 gen_or(b2, b1);
807 /* Check for pad = 0, short header case */
808 tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7);
809 b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr));
810 gen_and(tmp, b2);
811 gen_or(b2, b1);
812
813 /* Combine with test for linktype */
814 gen_and(b0, b1);
815 return b1;
816 }
817
818 static struct block *
819 gen_host(addr, mask, proto, dir)
820 bpf_u_int32 addr;
821 bpf_u_int32 mask;
822 int proto;
823 int dir;
824 {
825 struct block *b0, *b1;
826
827 switch (proto) {
828
829 case Q_DEFAULT:
830 b0 = gen_host(addr, mask, Q_IP, dir);
831 b1 = gen_host(addr, mask, Q_ARP, dir);
832 gen_or(b0, b1);
833 b0 = gen_host(addr, mask, Q_RARP, dir);
834 gen_or(b1, b0);
835 return b0;
836
837 case Q_IP:
838 return gen_hostop(addr, mask, dir, ETHERTYPE_IP,
839 off_nl + 12, off_nl + 16);
840
841 case Q_RARP:
842 return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP,
843 off_nl + 14, off_nl + 24);
844
845 case Q_ARP:
846 return gen_hostop(addr, mask, dir, ETHERTYPE_ARP,
847 off_nl + 14, off_nl + 24);
848
849 case Q_TCP:
850 bpf_error("'tcp' modifier applied to host");
851
852 case Q_UDP:
853 bpf_error("'udp' modifier applied to host");
854
855 case Q_ICMP:
856 bpf_error("'icmp' modifier applied to host");
857
858 case Q_IGMP:
859 bpf_error("'igmp' modifier applied to host");
860
861 case Q_DECNET:
862 return gen_dnhostop(addr, dir, off_nl);
863
864 case Q_LAT:
865 bpf_error("LAT host filtering not implemented");
866
867 case Q_MOPDL:
868 bpf_error("MOPDL host filtering not implemented");
869
870 case Q_MOPRC:
871 bpf_error("MOPRC host filtering not implemented");
872
873 default:
874 abort();
875 }
876 /* NOTREACHED */
877 }
878
879 static struct block *
880 gen_gateway(eaddr, alist, proto, dir)
881 u_char *eaddr;
882 bpf_u_int32 **alist;
883 int proto;
884 int dir;
885 {
886 struct block *b0, *b1, *tmp;
887
888 if (dir != 0)
889 bpf_error("direction applied to 'gateway'");
890
891 switch (proto) {
892 case Q_DEFAULT:
893 case Q_IP:
894 case Q_ARP:
895 case Q_RARP:
896 if (linktype == DLT_EN10MB)
897 b0 = gen_ehostop(eaddr, Q_OR);
898 else if (linktype == DLT_FDDI)
899 b0 = gen_fhostop(eaddr, Q_OR);
900 else
901 bpf_error(
902 "'gateway' supported only on ethernet or FDDI");
903
904 b1 = gen_host(**alist++, 0xffffffffL, proto, Q_OR);
905 while (*alist) {
906 tmp = gen_host(**alist++, 0xffffffffL, proto, Q_OR);
907 gen_or(b1, tmp);
908 b1 = tmp;
909 }
910 gen_not(b1);
911 gen_and(b0, b1);
912 return b1;
913 }
914 bpf_error("illegal modifier of 'gateway'");
915 /* NOTREACHED */
916 }
917
918 struct block *
919 gen_proto_abbrev(proto)
920 int proto;
921 {
922 struct block *b0, *b1;
923
924 switch (proto) {
925
926 case Q_TCP:
927 b0 = gen_linktype(ETHERTYPE_IP);
928 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP);
929 gen_and(b0, b1);
930 break;
931
932 case Q_UDP:
933 b0 = gen_linktype(ETHERTYPE_IP);
934 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP);
935 gen_and(b0, b1);
936 break;
937
938 case Q_ICMP:
939 b0 = gen_linktype(ETHERTYPE_IP);
940 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP);
941 gen_and(b0, b1);
942 break;
943
944 case Q_IGMP:
945 b0 = gen_linktype(ETHERTYPE_IP);
946 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2);
947 gen_and(b0, b1);
948 break;
949
950 case Q_IP:
951 b1 = gen_linktype(ETHERTYPE_IP);
952 break;
953
954 case Q_ARP:
955 b1 = gen_linktype(ETHERTYPE_ARP);
956 break;
957
958 case Q_RARP:
959 b1 = gen_linktype(ETHERTYPE_REVARP);
960 break;
961
962 case Q_LINK:
963 bpf_error("link layer applied in wrong context");
964
965 case Q_DECNET:
966 b1 = gen_linktype(ETHERTYPE_DN);
967 break;
968
969 case Q_LAT:
970 b1 = gen_linktype(ETHERTYPE_LAT);
971 break;
972
973 case Q_MOPDL:
974 b1 = gen_linktype(ETHERTYPE_MOPDL);
975 break;
976
977 case Q_MOPRC:
978 b1 = gen_linktype(ETHERTYPE_MOPRC);
979 break;
980
981 default:
982 abort();
983 }
984 return b1;
985 }
986
987 static struct block *
988 gen_ipfrag()
989 {
990 struct slist *s;
991 struct block *b;
992
993 /* not ip frag */
994 s = new_stmt(BPF_LD|BPF_H|BPF_ABS);
995 s->s.k = off_nl + 6;
996 b = new_block(JMP(BPF_JSET));
997 b->s.k = 0x1fff;
998 b->stmts = s;
999 gen_not(b);
1000
1001 return b;
1002 }
1003
1004 static struct block *
1005 gen_portatom(off, v)
1006 int off;
1007 bpf_int32 v;
1008 {
1009 struct slist *s;
1010 struct block *b;
1011
1012 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1013 s->s.k = off_nl;
1014
1015 s->next = new_stmt(BPF_LD|BPF_IND|BPF_H);
1016 s->next->s.k = off_nl + off;
1017
1018 b = new_block(JMP(BPF_JEQ));
1019 b->stmts = s;
1020 b->s.k = v;
1021
1022 return b;
1023 }
1024
1025 struct block *
1026 gen_portop(port, proto, dir)
1027 int port, proto, dir;
1028 {
1029 struct block *b0, *b1, *tmp;
1030
1031 /* ip proto 'proto' */
1032 tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto);
1033 b0 = gen_ipfrag();
1034 gen_and(tmp, b0);
1035
1036 switch (dir) {
1037 case Q_SRC:
1038 b1 = gen_portatom(0, (bpf_int32)port);
1039 break;
1040
1041 case Q_DST:
1042 b1 = gen_portatom(2, (bpf_int32)port);
1043 break;
1044
1045 case Q_OR:
1046 case Q_DEFAULT:
1047 tmp = gen_portatom(0, (bpf_int32)port);
1048 b1 = gen_portatom(2, (bpf_int32)port);
1049 gen_or(tmp, b1);
1050 break;
1051
1052 case Q_AND:
1053 tmp = gen_portatom(0, (bpf_int32)port);
1054 b1 = gen_portatom(2, (bpf_int32)port);
1055 gen_and(tmp, b1);
1056 break;
1057
1058 default:
1059 abort();
1060 }
1061 gen_and(b0, b1);
1062
1063 return b1;
1064 }
1065
1066 static struct block *
1067 gen_port(port, ip_proto, dir)
1068 int port;
1069 int ip_proto;
1070 int dir;
1071 {
1072 struct block *b0, *b1, *tmp;
1073
1074 /* ether proto ip */
1075 b0 = gen_linktype(ETHERTYPE_IP);
1076
1077 switch (ip_proto) {
1078 case IPPROTO_UDP:
1079 case IPPROTO_TCP:
1080 b1 = gen_portop(port, ip_proto, dir);
1081 break;
1082
1083 case PROTO_UNDEF:
1084 tmp = gen_portop(port, IPPROTO_TCP, dir);
1085 b1 = gen_portop(port, IPPROTO_UDP, dir);
1086 gen_or(tmp, b1);
1087 break;
1088
1089 default:
1090 abort();
1091 }
1092 gen_and(b0, b1);
1093 return b1;
1094 }
1095
1096 static int
1097 lookup_proto(name, proto)
1098 char *name;
1099 int proto;
1100 {
1101 int v;
1102
1103 switch (proto) {
1104 case Q_DEFAULT:
1105 case Q_IP:
1106 v = pcap_nametoproto(name);
1107 if (v == PROTO_UNDEF)
1108 bpf_error("unknown ip proto '%s'", name);
1109 break;
1110
1111 case Q_LINK:
1112 /* XXX should look up h/w protocol type based on linktype */
1113 v = pcap_nametoeproto(name);
1114 if (v == PROTO_UNDEF)
1115 bpf_error("unknown ether proto '%s'", name);
1116 break;
1117
1118 default:
1119 v = PROTO_UNDEF;
1120 break;
1121 }
1122 return v;
1123 }
1124
1125 static struct block *
1126 gen_proto(v, proto, dir)
1127 int v;
1128 int proto;
1129 int dir;
1130 {
1131 struct block *b0, *b1;
1132
1133 if (dir != Q_DEFAULT)
1134 bpf_error("direction applied to 'proto'");
1135
1136 switch (proto) {
1137 case Q_DEFAULT:
1138 case Q_IP:
1139 b0 = gen_linktype(ETHERTYPE_IP);
1140 b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v);
1141 gen_and(b0, b1);
1142 return b1;
1143
1144 case Q_ARP:
1145 bpf_error("arp does not encapsulate another protocol");
1146 /* NOTREACHED */
1147
1148 case Q_RARP:
1149 bpf_error("rarp does not encapsulate another protocol");
1150 /* NOTREACHED */
1151
1152 case Q_DECNET:
1153 bpf_error("decnet encapsulation is not specifiable");
1154 /* NOTREACHED */
1155
1156 case Q_LAT:
1157 bpf_error("lat does not encapsulate another protocol");
1158 /* NOTREACHED */
1159
1160 case Q_MOPRC:
1161 bpf_error("moprc does not encapsulate another protocol");
1162 /* NOTREACHED */
1163
1164 case Q_MOPDL:
1165 bpf_error("mopdl does not encapsulate another protocol");
1166 /* NOTREACHED */
1167
1168 case Q_LINK:
1169 return gen_linktype(v);
1170
1171 case Q_UDP:
1172 bpf_error("'udp proto' is bogus");
1173 /* NOTREACHED */
1174
1175 case Q_TCP:
1176 bpf_error("'tcp proto' is bogus");
1177 /* NOTREACHED */
1178
1179 case Q_ICMP:
1180 bpf_error("'icmp proto' is bogus");
1181 /* NOTREACHED */
1182
1183 case Q_IGMP:
1184 bpf_error("'igmp proto' is bogus");
1185 /* NOTREACHED */
1186
1187 default:
1188 abort();
1189 /* NOTREACHED */
1190 }
1191 /* NOTREACHED */
1192 }
1193
1194 /*
1195 * Left justify 'addr' and return its resulting network mask.
1196 */
1197 static bpf_u_int32
1198 net_mask(addr)
1199 bpf_u_int32 *addr;
1200 {
1201 register bpf_u_int32 m = 0xffffffff;
1202
1203 if (*addr)
1204 while ((*addr & 0xff000000) == 0)
1205 *addr <<= 8, m <<= 8;
1206
1207 return m;
1208 }
1209
1210 struct block *
1211 gen_scode(name, q)
1212 char *name;
1213 struct qual q;
1214 {
1215 int proto = q.proto;
1216 int dir = q.dir;
1217 u_char *eaddr;
1218 bpf_u_int32 mask, addr, **alist;
1219 struct block *b, *tmp;
1220 int port, real_proto;
1221
1222 switch (q.addr) {
1223
1224 case Q_NET:
1225 addr = pcap_nametonetaddr(name);
1226 if (addr == 0)
1227 bpf_error("unknown network '%s'", name);
1228 mask = net_mask(&addr);
1229 return gen_host(addr, mask, proto, dir);
1230
1231 case Q_DEFAULT:
1232 case Q_HOST:
1233 if (proto == Q_LINK) {
1234 switch (linktype) {
1235
1236 case DLT_EN10MB:
1237 eaddr = pcap_ether_hostton(name);
1238 if (eaddr == NULL)
1239 bpf_error(
1240 "unknown ether host '%s'", name);
1241 return gen_ehostop(eaddr, dir);
1242
1243 case DLT_FDDI:
1244 eaddr = pcap_ether_hostton(name);
1245 if (eaddr == NULL)
1246 bpf_error(
1247 "unknown FDDI host '%s'", name);
1248 return gen_fhostop(eaddr, dir);
1249
1250 default:
1251 bpf_error(
1252 "only ethernet/FDDI supports link-level host name");
1253 break;
1254 }
1255 } else if (proto == Q_DECNET) {
1256 unsigned short dn_addr = __pcap_nametodnaddr(name);
1257 /*
1258 * I don't think DECNET hosts can be multihomed, so
1259 * there is no need to build up a list of addresses
1260 */
1261 return (gen_host(dn_addr, 0, proto, dir));
1262 } else {
1263 alist = pcap_nametoaddr(name);
1264 if (alist == NULL || *alist == NULL)
1265 bpf_error("unknown host '%s'", name);
1266 b = gen_host(**alist++, 0xffffffffL, proto, dir);
1267 while (*alist) {
1268 tmp = gen_host(**alist++, 0xffffffffL,
1269 proto, dir);
1270 gen_or(b, tmp);
1271 b = tmp;
1272 }
1273 return b;
1274 }
1275
1276 case Q_PORT:
1277 if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP)
1278 bpf_error("illegal qualifier of 'port'");
1279 if (pcap_nametoport(name, &port, &real_proto) == 0)
1280 bpf_error("unknown port '%s'", name);
1281 if (proto == Q_UDP) {
1282 if (real_proto == IPPROTO_TCP)
1283 bpf_error("port '%s' is tcp", name);
1284 else
1285 /* override PROTO_UNDEF */
1286 real_proto = IPPROTO_UDP;
1287 }
1288 if (proto == Q_TCP) {
1289 if (real_proto == IPPROTO_UDP)
1290 bpf_error("port '%s' is udp", name);
1291 else
1292 /* override PROTO_UNDEF */
1293 real_proto = IPPROTO_TCP;
1294 }
1295 return gen_port(port, real_proto, dir);
1296
1297 case Q_GATEWAY:
1298 eaddr = pcap_ether_hostton(name);
1299 if (eaddr == NULL)
1300 bpf_error("unknown ether host: %s", name);
1301
1302 alist = pcap_nametoaddr(name);
1303 if (alist == NULL || *alist == NULL)
1304 bpf_error("unknown host '%s'", name);
1305 return gen_gateway(eaddr, alist, proto, dir);
1306
1307 case Q_PROTO:
1308 real_proto = lookup_proto(name, proto);
1309 if (real_proto >= 0)
1310 return gen_proto(real_proto, proto, dir);
1311 else
1312 bpf_error("unknown protocol: %s", name);
1313
1314 case Q_UNDEF:
1315 syntax();
1316 /* NOTREACHED */
1317 }
1318 abort();
1319 /* NOTREACHED */
1320 }
1321
1322 struct block *
1323 gen_ncode(v, q)
1324 bpf_u_int32 v;
1325 struct qual q;
1326 {
1327 bpf_u_int32 mask;
1328 int proto = q.proto;
1329 int dir = q.dir;
1330
1331 switch (q.addr) {
1332
1333 case Q_DEFAULT:
1334 case Q_HOST:
1335 case Q_NET:
1336 if (proto == Q_DECNET)
1337 return gen_host(v, 0, proto, dir);
1338 else if (proto == Q_LINK) {
1339 bpf_error("illegal link layer address");
1340 } else {
1341 mask = net_mask(&v);
1342 return gen_host(v, mask, proto, dir);
1343 }
1344
1345 case Q_PORT:
1346 if (proto == Q_UDP)
1347 proto = IPPROTO_UDP;
1348 else if (proto == Q_TCP)
1349 proto = IPPROTO_TCP;
1350 else if (proto == Q_DEFAULT)
1351 proto = PROTO_UNDEF;
1352 else
1353 bpf_error("illegal qualifier of 'port'");
1354
1355 return gen_port((int)v, proto, dir);
1356
1357 case Q_GATEWAY:
1358 bpf_error("'gateway' requires a name");
1359 /* NOTREACHED */
1360
1361 case Q_PROTO:
1362 return gen_proto((int)v, proto, dir);
1363
1364 case Q_UNDEF:
1365 syntax();
1366 /* NOTREACHED */
1367
1368 default:
1369 abort();
1370 /* NOTREACHED */
1371 }
1372 /* NOTREACHED */
1373 }
1374
1375 struct block *
1376 gen_ecode(eaddr, q)
1377 u_char *eaddr;
1378 struct qual q;
1379 {
1380 if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) {
1381 if (linktype == DLT_EN10MB)
1382 return gen_ehostop(eaddr, (int)q.dir);
1383 if (linktype == DLT_FDDI)
1384 return gen_fhostop(eaddr, (int)q.dir);
1385 }
1386 bpf_error("ethernet address used in non-ether expression");
1387 /* NOTREACHED */
1388 }
1389
1390 void
1391 sappend(s0, s1)
1392 struct slist *s0, *s1;
1393 {
1394 /*
1395 * This is definitely not the best way to do this, but the
1396 * lists will rarely get long.
1397 */
1398 while (s0->next)
1399 s0 = s0->next;
1400 s0->next = s1;
1401 }
1402
1403 static struct slist *
1404 xfer_to_x(a)
1405 struct arth *a;
1406 {
1407 struct slist *s;
1408
1409 s = new_stmt(BPF_LDX|BPF_MEM);
1410 s->s.k = a->regno;
1411 return s;
1412 }
1413
1414 static struct slist *
1415 xfer_to_a(a)
1416 struct arth *a;
1417 {
1418 struct slist *s;
1419
1420 s = new_stmt(BPF_LD|BPF_MEM);
1421 s->s.k = a->regno;
1422 return s;
1423 }
1424
1425 struct arth *
1426 gen_load(proto, index, size)
1427 int proto;
1428 struct arth *index;
1429 int size;
1430 {
1431 struct slist *s, *tmp;
1432 struct block *b;
1433 int regno = alloc_reg();
1434
1435 free_reg(index->regno);
1436 switch (size) {
1437
1438 default:
1439 bpf_error("data size must be 1, 2, or 4");
1440
1441 case 1:
1442 size = BPF_B;
1443 break;
1444
1445 case 2:
1446 size = BPF_H;
1447 break;
1448
1449 case 4:
1450 size = BPF_W;
1451 break;
1452 }
1453 switch (proto) {
1454 default:
1455 bpf_error("unsupported index operation");
1456
1457 case Q_LINK:
1458 s = xfer_to_x(index);
1459 tmp = new_stmt(BPF_LD|BPF_IND|size);
1460 sappend(s, tmp);
1461 sappend(index->s, s);
1462 break;
1463
1464 case Q_IP:
1465 case Q_ARP:
1466 case Q_RARP:
1467 case Q_DECNET:
1468 case Q_LAT:
1469 case Q_MOPRC:
1470 case Q_MOPDL:
1471 /* XXX Note that we assume a fixed link link header here. */
1472 s = xfer_to_x(index);
1473 tmp = new_stmt(BPF_LD|BPF_IND|size);
1474 tmp->s.k = off_nl;
1475 sappend(s, tmp);
1476 sappend(index->s, s);
1477
1478 b = gen_proto_abbrev(proto);
1479 if (index->b)
1480 gen_and(index->b, b);
1481 index->b = b;
1482 break;
1483
1484 case Q_TCP:
1485 case Q_UDP:
1486 case Q_ICMP:
1487 case Q_IGMP:
1488 s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1489 s->s.k = off_nl;
1490 sappend(s, xfer_to_a(index));
1491 sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X));
1492 sappend(s, new_stmt(BPF_MISC|BPF_TAX));
1493 sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size));
1494 tmp->s.k = off_nl;
1495 sappend(index->s, s);
1496
1497 gen_and(gen_proto_abbrev(proto), b = gen_ipfrag());
1498 if (index->b)
1499 gen_and(index->b, b);
1500 index->b = b;
1501 break;
1502 }
1503 index->regno = regno;
1504 s = new_stmt(BPF_ST);
1505 s->s.k = regno;
1506 sappend(index->s, s);
1507
1508 return index;
1509 }
1510
1511 struct block *
1512 gen_relation(code, a0, a1, reversed)
1513 int code;
1514 struct arth *a0, *a1;
1515 int reversed;
1516 {
1517 struct slist *s0, *s1, *s2;
1518 struct block *b, *tmp;
1519
1520 s0 = xfer_to_x(a1);
1521 s1 = xfer_to_a(a0);
1522 s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
1523 b = new_block(JMP(code));
1524 if (code == BPF_JGT || code == BPF_JGE) {
1525 reversed = !reversed;
1526 b->s.k = 0x80000000;
1527 }
1528 if (reversed)
1529 gen_not(b);
1530
1531 sappend(s1, s2);
1532 sappend(s0, s1);
1533 sappend(a1->s, s0);
1534 sappend(a0->s, a1->s);
1535
1536 b->stmts = a0->s;
1537
1538 free_reg(a0->regno);
1539 free_reg(a1->regno);
1540
1541 /* 'and' together protocol checks */
1542 if (a0->b) {
1543 if (a1->b) {
1544 gen_and(a0->b, tmp = a1->b);
1545 }
1546 else
1547 tmp = a0->b;
1548 } else
1549 tmp = a1->b;
1550
1551 if (tmp)
1552 gen_and(tmp, b);
1553
1554 return b;
1555 }
1556
1557 struct arth *
1558 gen_loadlen()
1559 {
1560 int regno = alloc_reg();
1561 struct arth *a = (struct arth *)newchunk(sizeof(*a));
1562 struct slist *s;
1563
1564 s = new_stmt(BPF_LD|BPF_LEN);
1565 s->next = new_stmt(BPF_ST);
1566 s->next->s.k = regno;
1567 a->s = s;
1568 a->regno = regno;
1569
1570 return a;
1571 }
1572
1573 struct arth *
1574 gen_loadi(val)
1575 int val;
1576 {
1577 struct arth *a;
1578 struct slist *s;
1579 int reg;
1580
1581 a = (struct arth *)newchunk(sizeof(*a));
1582
1583 reg = alloc_reg();
1584
1585 s = new_stmt(BPF_LD|BPF_IMM);
1586 s->s.k = val;
1587 s->next = new_stmt(BPF_ST);
1588 s->next->s.k = reg;
1589 a->s = s;
1590 a->regno = reg;
1591
1592 return a;
1593 }
1594
1595 struct arth *
1596 gen_neg(a)
1597 struct arth *a;
1598 {
1599 struct slist *s;
1600
1601 s = xfer_to_a(a);
1602 sappend(a->s, s);
1603 s = new_stmt(BPF_ALU|BPF_NEG);
1604 s->s.k = 0;
1605 sappend(a->s, s);
1606 s = new_stmt(BPF_ST);
1607 s->s.k = a->regno;
1608 sappend(a->s, s);
1609
1610 return a;
1611 }
1612
1613 struct arth *
1614 gen_arth(code, a0, a1)
1615 int code;
1616 struct arth *a0, *a1;
1617 {
1618 struct slist *s0, *s1, *s2;
1619
1620 s0 = xfer_to_x(a1);
1621 s1 = xfer_to_a(a0);
1622 s2 = new_stmt(BPF_ALU|BPF_X|code);
1623
1624 sappend(s1, s2);
1625 sappend(s0, s1);
1626 sappend(a1->s, s0);
1627 sappend(a0->s, a1->s);
1628
1629 free_reg(a1->regno);
1630
1631 s0 = new_stmt(BPF_ST);
1632 a0->regno = s0->s.k = alloc_reg();
1633 sappend(a0->s, s0);
1634
1635 return a0;
1636 }
1637
1638 /*
1639 * Here we handle simple allocation of the scratch registers.
1640 * If too many registers are alloc'd, the allocator punts.
1641 */
1642 static int regused[BPF_MEMWORDS];
1643 static int curreg;
1644
1645 /*
1646 * Return the next free register.
1647 */
1648 static int
1649 alloc_reg()
1650 {
1651 int n = BPF_MEMWORDS;
1652
1653 while (--n >= 0) {
1654 if (regused[curreg])
1655 curreg = (curreg + 1) % BPF_MEMWORDS;
1656 else {
1657 regused[curreg] = 1;
1658 return curreg;
1659 }
1660 }
1661 bpf_error("too many registers needed to evaluate expression");
1662 /* NOTREACHED */
1663 }
1664
1665 /*
1666 * Return a register to the table so it can
1667 * be used later.
1668 */
1669 static void
1670 free_reg(n)
1671 int n;
1672 {
1673 regused[n] = 0;
1674 }
1675
1676 static struct block *
1677 gen_len(jmp, n)
1678 int jmp, n;
1679 {
1680 struct slist *s;
1681 struct block *b;
1682
1683 s = new_stmt(BPF_LD|BPF_LEN);
1684 b = new_block(JMP(jmp));
1685 b->stmts = s;
1686 b->s.k = n;
1687
1688 return b;
1689 }
1690
1691 struct block *
1692 gen_greater(n)
1693 int n;
1694 {
1695 return gen_len(BPF_JGE, n);
1696 }
1697
1698 struct block *
1699 gen_less(n)
1700 int n;
1701 {
1702 struct block *b;
1703
1704 b = gen_len(BPF_JGT, n);
1705 gen_not(b);
1706
1707 return b;
1708 }
1709
1710 struct block *
1711 gen_byteop(op, idx, val)
1712 int op, idx, val;
1713 {
1714 struct block *b;
1715 struct slist *s;
1716
1717 switch (op) {
1718 default:
1719 abort();
1720
1721 case '=':
1722 return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1723
1724 case '<':
1725 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1726 b->s.code = JMP(BPF_JGE);
1727 gen_not(b);
1728 return b;
1729
1730 case '>':
1731 b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1732 b->s.code = JMP(BPF_JGT);
1733 return b;
1734
1735 case '|':
1736 s = new_stmt(BPF_ALU|BPF_OR|BPF_K);
1737 break;
1738
1739 case '&':
1740 s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
1741 break;
1742 }
1743 s->s.k = val;
1744 b = new_block(JMP(BPF_JEQ));
1745 b->stmts = s;
1746 gen_not(b);
1747
1748 return b;
1749 }
1750
1751 struct block *
1752 gen_broadcast(proto)
1753 int proto;
1754 {
1755 bpf_u_int32 hostmask;
1756 struct block *b0, *b1, *b2;
1757 static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1758
1759 switch (proto) {
1760
1761 case Q_DEFAULT:
1762 case Q_LINK:
1763 if (linktype == DLT_EN10MB)
1764 return gen_ehostop(ebroadcast, Q_DST);
1765 if (linktype == DLT_FDDI)
1766 return gen_fhostop(ebroadcast, Q_DST);
1767 bpf_error("not a broadcast link");
1768 break;
1769
1770 case Q_IP:
1771 b0 = gen_linktype(ETHERTYPE_IP);
1772 hostmask = ~netmask;
1773 b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask);
1774 b2 = gen_mcmp(off_nl + 16, BPF_W,
1775 (bpf_int32)(~0 & hostmask), hostmask);
1776 gen_or(b1, b2);
1777 gen_and(b0, b2);
1778 return b2;
1779 }
1780 bpf_error("only ether/ip broadcast filters supported");
1781 }
1782
1783 struct block *
1784 gen_multicast(proto)
1785 int proto;
1786 {
1787 register struct block *b0, *b1;
1788 register struct slist *s;
1789
1790 switch (proto) {
1791
1792 case Q_DEFAULT:
1793 case Q_LINK:
1794 if (linktype == DLT_EN10MB) {
1795 /* ether[0] & 1 != 0 */
1796 s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1797 s->s.k = 0;
1798 b0 = new_block(JMP(BPF_JSET));
1799 b0->s.k = 1;
1800 b0->stmts = s;
1801 return b0;
1802 }
1803
1804 if (linktype == DLT_FDDI) {
1805 /* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */
1806 /* fddi[1] & 1 != 0 */
1807 s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1808 s->s.k = 1;
1809 b0 = new_block(JMP(BPF_JSET));
1810 b0->s.k = 1;
1811 b0->stmts = s;
1812 return b0;
1813 }
1814 /* Link not known to support multicasts */
1815 break;
1816
1817 case Q_IP:
1818 b0 = gen_linktype(ETHERTYPE_IP);
1819 b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224);
1820 b1->s.code = JMP(BPF_JGE);
1821 gen_and(b0, b1);
1822 return b1;
1823 }
1824 bpf_error("only IP multicast filters supported on ethernet/FDDI");
1825 }
1826
1827 /*
1828 * generate command for inbound/outbound. It's here so we can
1829 * make it link-type specific. 'dir' = 0 implies "inbound",
1830 * = 1 implies "outbound".
1831 */
1832 struct block *
1833 gen_inbound(dir)
1834 int dir;
1835 {
1836 register struct block *b0;
1837
1838 b0 = gen_relation(BPF_JEQ,
1839 gen_load(Q_LINK, gen_loadi(0), 1),
1840 gen_loadi(0),
1841 dir);
1842 return (b0);
1843 }