]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/bpf_filter.c
xnu-3789.31.2.tar.gz
[apple/xnu.git] / bsd / net / bpf_filter.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from the Stanford/CMU enet packet filter,
33 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35 * Berkeley Laboratory.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93
66 *
67 * $FreeBSD: src/sys/net/bpf_filter.c,v 1.17 1999/12/29 04:38:31 peter Exp $
68 */
69
70 #include <sys/param.h>
71 #include <string.h>
72
73 #ifdef sun
74 #include <netinet/in.h>
75 #endif
76
77 #if !defined(__i386__) && !defined(__x86_64__)
78 #define BPF_ALIGN 1
79 #else /* defined(__i386__) || defined(__x86_64__) */
80 #define BPF_ALIGN 0
81 #endif /* defined(__i386__) || defined(__x86_64__) */
82
83 #if !BPF_ALIGN
84 #define EXTRACT_SHORT(p) ((u_int16_t)ntohs(*(u_int16_t *)(void *)p))
85 #define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)(void *)p))
86 #else
87 #define EXTRACT_SHORT(p)\
88 ((u_int16_t)\
89 ((u_int16_t)*((u_char *)p+0)<<8|\
90 (u_int16_t)*((u_char *)p+1)<<0))
91 #define EXTRACT_LONG(p)\
92 ((u_int32_t)*((u_char *)p+0)<<24|\
93 (u_int32_t)*((u_char *)p+1)<<16|\
94 (u_int32_t)*((u_char *)p+2)<<8|\
95 (u_int32_t)*((u_char *)p+3)<<0)
96 #endif
97
98 #ifdef KERNEL
99 #include <sys/mbuf.h>
100 #endif
101 #include <net/bpf.h>
102 #ifdef KERNEL
103 #define MINDEX(m, k) \
104 { \
105 unsigned int len = m->m_len; \
106 \
107 while (k >= len) { \
108 k -= len; \
109 m = m->m_next; \
110 if (m == 0) \
111 return 0; \
112 len = m->m_len; \
113 } \
114 }
115
116 extern unsigned int bpf_maxbufsize;
117
118 static u_int16_t m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err);
119 static u_int32_t m_xword(struct mbuf *m, bpf_u_int32 k, int *err);
120
121 static u_int32_t
122 m_xword(struct mbuf *m, bpf_u_int32 k, int *err)
123 {
124 size_t len;
125 u_char *cp, *np;
126 struct mbuf *m0;
127
128 len = m->m_len;
129 while (k >= len) {
130 k -= len;
131 m = m->m_next;
132 if (m == 0)
133 goto bad;
134 len = m->m_len;
135 }
136 cp = mtod(m, u_char *) + k;
137 if (len - k >= 4) {
138 *err = 0;
139 return EXTRACT_LONG(cp);
140 }
141 m0 = m->m_next;
142 if (m0 == 0 || m0->m_len + len - k < 4)
143 goto bad;
144 *err = 0;
145 np = mtod(m0, u_char *);
146 switch (len - k) {
147
148 case 1:
149 return
150 ((u_int32_t)cp[0] << 24) |
151 ((u_int32_t)np[0] << 16) |
152 ((u_int32_t)np[1] << 8) |
153 (u_int32_t)np[2];
154
155 case 2:
156 return
157 ((u_int32_t)cp[0] << 24) |
158 ((u_int32_t)cp[1] << 16) |
159 ((u_int32_t)np[0] << 8) |
160 (u_int32_t)np[1];
161
162 default:
163 return
164 ((u_int32_t)cp[0] << 24) |
165 ((u_int32_t)cp[1] << 16) |
166 ((u_int32_t)cp[2] << 8) |
167 (u_int32_t)np[0];
168 }
169 bad:
170 *err = 1;
171 return 0;
172 }
173
174 static u_int16_t
175 m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err)
176 {
177 size_t len;
178 u_char *cp;
179 struct mbuf *m0;
180
181 len = m->m_len;
182 while (k >= len) {
183 k -= len;
184 m = m->m_next;
185 if (m == 0)
186 goto bad;
187 len = m->m_len;
188 }
189 cp = mtod(m, u_char *) + k;
190 if (len - k >= 2) {
191 *err = 0;
192 return EXTRACT_SHORT(cp);
193 }
194 m0 = m->m_next;
195 if (m0 == 0)
196 goto bad;
197 *err = 0;
198 return (cp[0] << 8) | mtod(m0, u_char *)[0];
199 bad:
200 *err = 1;
201 return 0;
202 }
203 #endif
204
205 /*
206 * Execute the filter program starting at pc on the packet p
207 * wirelen is the length of the original packet
208 * buflen is the amount of data present
209 */
210 u_int
211 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
212 {
213 u_int32_t A = 0, X = 0;
214 bpf_u_int32 k;
215 int32_t mem[BPF_MEMWORDS];
216
217 bzero(mem, sizeof(mem));
218
219 if (pc == 0)
220 /*
221 * No filter means accept all.
222 */
223 return (u_int)-1;
224
225 --pc;
226 while (1) {
227 ++pc;
228 switch (pc->code) {
229
230 default:
231 #ifdef KERNEL
232 return 0;
233 #else
234 abort();
235 #endif
236 case BPF_RET|BPF_K:
237 return (u_int)pc->k;
238
239 case BPF_RET|BPF_A:
240 return (u_int)A;
241
242 case BPF_LD|BPF_W|BPF_ABS:
243 k = pc->k;
244 if (k > buflen || sizeof(int32_t) > buflen - k) {
245 #ifdef KERNEL
246 int merr;
247
248 if (buflen != 0)
249 return 0;
250 A = m_xword((struct mbuf *)(void *)p, k, &merr);
251 if (merr != 0)
252 return 0;
253 continue;
254 #else
255 return 0;
256 #endif
257 }
258 #if BPF_ALIGN
259 if (((intptr_t)(p + k) & 3) != 0)
260 A = EXTRACT_LONG(&p[k]);
261 else
262 #endif
263 A = ntohl(*(int32_t *)(void *)(p + k));
264 continue;
265
266 case BPF_LD|BPF_H|BPF_ABS:
267 k = pc->k;
268 if (k > buflen || sizeof(int16_t) > buflen - k) {
269 #ifdef KERNEL
270 int merr;
271
272 if (buflen != 0)
273 return 0;
274 A = m_xhalf((struct mbuf *)(void *)p, k, &merr);
275 continue;
276 #else
277 return 0;
278 #endif
279 }
280 A = EXTRACT_SHORT(&p[k]);
281 continue;
282
283 case BPF_LD|BPF_B|BPF_ABS:
284 k = pc->k;
285 if (k >= buflen) {
286 #ifdef KERNEL
287 struct mbuf *m;
288
289 if (buflen != 0)
290 return 0;
291 m = (struct mbuf *)(void *)p;
292 MINDEX(m, k);
293 A = mtod(m, u_char *)[k];
294 continue;
295 #else
296 return 0;
297 #endif
298 }
299 A = p[k];
300 continue;
301
302 case BPF_LD|BPF_W|BPF_LEN:
303 A = wirelen;
304 continue;
305
306 case BPF_LDX|BPF_W|BPF_LEN:
307 X = wirelen;
308 continue;
309
310 case BPF_LD|BPF_W|BPF_IND:
311 k = X + pc->k;
312 if (pc->k > buflen || X > buflen - pc->k ||
313 sizeof(int32_t) > buflen - k) {
314 #ifdef KERNEL
315 int merr;
316
317 if (buflen != 0)
318 return 0;
319 A = m_xword((struct mbuf *)(void *)p, k, &merr);
320 if (merr != 0)
321 return 0;
322 continue;
323 #else
324 return 0;
325 #endif
326 }
327 #if BPF_ALIGN
328 if (((intptr_t)(p + k) & 3) != 0)
329 A = EXTRACT_LONG(&p[k]);
330 else
331 #endif
332 A = ntohl(*(int32_t *)(void *)(p + k));
333 continue;
334
335 case BPF_LD|BPF_H|BPF_IND:
336 k = X + pc->k;
337 if (X > buflen || pc->k > buflen - X ||
338 sizeof(int16_t) > buflen - k) {
339 #ifdef KERNEL
340 int merr;
341
342 if (buflen != 0)
343 return 0;
344 A = m_xhalf((struct mbuf *)(void *)p, k, &merr);
345 if (merr != 0)
346 return 0;
347 continue;
348 #else
349 return 0;
350 #endif
351 }
352 A = EXTRACT_SHORT(&p[k]);
353 continue;
354
355 case BPF_LD|BPF_B|BPF_IND:
356 k = X + pc->k;
357 if (pc->k >= buflen || X >= buflen - pc->k) {
358 #ifdef KERNEL
359 struct mbuf *m;
360
361 if (buflen != 0)
362 return 0;
363 m = (struct mbuf *)(void *)p;
364 MINDEX(m, k);
365 A = mtod(m, u_char *)[k];
366 continue;
367 #else
368 return 0;
369 #endif
370 }
371 A = p[k];
372 continue;
373
374 case BPF_LDX|BPF_MSH|BPF_B:
375 k = pc->k;
376 if (k >= buflen) {
377 #ifdef KERNEL
378 struct mbuf *m;
379
380 if (buflen != 0)
381 return 0;
382 m = (struct mbuf *)(void *)p;
383 MINDEX(m, k);
384 X = (mtod(m, u_char *)[k] & 0xf) << 2;
385 continue;
386 #else
387 return 0;
388 #endif
389 }
390 X = (p[pc->k] & 0xf) << 2;
391 continue;
392
393 case BPF_LD|BPF_IMM:
394 A = pc->k;
395 continue;
396
397 case BPF_LDX|BPF_IMM:
398 X = pc->k;
399 continue;
400
401 case BPF_LD|BPF_MEM:
402 A = mem[pc->k];
403 continue;
404
405 case BPF_LDX|BPF_MEM:
406 X = mem[pc->k];
407 continue;
408
409 case BPF_ST:
410 if (pc->k >= BPF_MEMWORDS)
411 return 0;
412 mem[pc->k] = A;
413 continue;
414
415 case BPF_STX:
416 if (pc->k >= BPF_MEMWORDS)
417 return 0;
418 mem[pc->k] = X;
419 continue;
420
421 case BPF_JMP|BPF_JA:
422 pc += pc->k;
423 continue;
424
425 case BPF_JMP|BPF_JGT|BPF_K:
426 pc += (A > pc->k) ? pc->jt : pc->jf;
427 continue;
428
429 case BPF_JMP|BPF_JGE|BPF_K:
430 pc += (A >= pc->k) ? pc->jt : pc->jf;
431 continue;
432
433 case BPF_JMP|BPF_JEQ|BPF_K:
434 pc += (A == pc->k) ? pc->jt : pc->jf;
435 continue;
436
437 case BPF_JMP|BPF_JSET|BPF_K:
438 pc += (A & pc->k) ? pc->jt : pc->jf;
439 continue;
440
441 case BPF_JMP|BPF_JGT|BPF_X:
442 pc += (A > X) ? pc->jt : pc->jf;
443 continue;
444
445 case BPF_JMP|BPF_JGE|BPF_X:
446 pc += (A >= X) ? pc->jt : pc->jf;
447 continue;
448
449 case BPF_JMP|BPF_JEQ|BPF_X:
450 pc += (A == X) ? pc->jt : pc->jf;
451 continue;
452
453 case BPF_JMP|BPF_JSET|BPF_X:
454 pc += (A & X) ? pc->jt : pc->jf;
455 continue;
456
457 case BPF_ALU|BPF_ADD|BPF_X:
458 A += X;
459 continue;
460
461 case BPF_ALU|BPF_SUB|BPF_X:
462 A -= X;
463 continue;
464
465 case BPF_ALU|BPF_MUL|BPF_X:
466 A *= X;
467 continue;
468
469 case BPF_ALU|BPF_DIV|BPF_X:
470 if (X == 0)
471 return 0;
472 A /= X;
473 continue;
474
475 case BPF_ALU|BPF_AND|BPF_X:
476 A &= X;
477 continue;
478
479 case BPF_ALU|BPF_OR|BPF_X:
480 A |= X;
481 continue;
482
483 case BPF_ALU|BPF_LSH|BPF_X:
484 A <<= X;
485 continue;
486
487 case BPF_ALU|BPF_RSH|BPF_X:
488 A >>= X;
489 continue;
490
491 case BPF_ALU|BPF_ADD|BPF_K:
492 A += pc->k;
493 continue;
494
495 case BPF_ALU|BPF_SUB|BPF_K:
496 A -= pc->k;
497 continue;
498
499 case BPF_ALU|BPF_MUL|BPF_K:
500 A *= pc->k;
501 continue;
502
503 case BPF_ALU|BPF_DIV|BPF_K:
504 A /= pc->k;
505 continue;
506
507 case BPF_ALU|BPF_AND|BPF_K:
508 A &= pc->k;
509 continue;
510
511 case BPF_ALU|BPF_OR|BPF_K:
512 A |= pc->k;
513 continue;
514
515 case BPF_ALU|BPF_LSH|BPF_K:
516 A <<= pc->k;
517 continue;
518
519 case BPF_ALU|BPF_RSH|BPF_K:
520 A >>= pc->k;
521 continue;
522
523 case BPF_ALU|BPF_NEG:
524 A = -A;
525 continue;
526
527 case BPF_MISC|BPF_TAX:
528 X = A;
529 continue;
530
531 case BPF_MISC|BPF_TXA:
532 A = X;
533 continue;
534 }
535 }
536 }
537
538 #ifdef KERNEL
539 /*
540 * Return true if the 'fcode' is a valid filter program.
541 * The constraints are that each jump be forward and to a valid
542 * code, that memory accesses are within valid ranges (to the
543 * extent that this can be checked statically; loads of packet data
544 * have to be, and are, also checked at run time), and that
545 * the code terminates with either an accept or reject.
546 *
547 * The kernel needs to be able to verify an application's filter code.
548 * Otherwise, a bogus program could easily crash the system.
549 */
550 int
551 bpf_validate(const struct bpf_insn *f, int len)
552 {
553 u_int i, from;
554 const struct bpf_insn *p;
555
556 if (len < 1 || len > BPF_MAXINSNS)
557 return 0;
558
559 for (i = 0; i < ((u_int)len); ++i) {
560 p = &f[i];
561 switch (BPF_CLASS(p->code)) {
562 /*
563 * Check that memory operations use valid addresses
564 */
565 case BPF_LD:
566 case BPF_LDX:
567 switch (BPF_MODE(p->code)) {
568 case BPF_IMM:
569 break;
570 case BPF_ABS:
571 case BPF_IND:
572 case BPF_MSH:
573 /*
574 * More strict check with actual packet length
575 * is done runtime.
576 */
577 if (p->k >= bpf_maxbufsize)
578 return 0;
579 break;
580 case BPF_MEM:
581 if (p->k >= BPF_MEMWORDS)
582 return 0;
583 break;
584 case BPF_LEN:
585 break;
586 default:
587 return 0;
588 }
589 break;
590 case BPF_ST:
591 case BPF_STX:
592 if (p->k >= BPF_MEMWORDS)
593 return 0;
594 break;
595 case BPF_ALU:
596 switch (BPF_OP(p->code)) {
597 case BPF_ADD:
598 case BPF_SUB:
599 case BPF_MUL:
600 case BPF_OR:
601 case BPF_AND:
602 case BPF_LSH:
603 case BPF_RSH:
604 case BPF_NEG:
605 break;
606 case BPF_DIV:
607 /*
608 * Check for constant division by 0
609 */
610 if(BPF_SRC(p->code) == BPF_K && p->k == 0)
611 return 0;
612 break;
613 default:
614 return 0;
615 }
616 break;
617 case BPF_JMP:
618 /*
619 * Check that jumps are within the code block,
620 * and that unconditional branches don't go
621 * backwards as a result of an overflow.
622 * Unconditional branches have a 32-bit offset,
623 * so they could overflow; we check to make
624 * sure they don't. Conditional branches have
625 * an 8-bit offset, and the from address is
626 * less than equal to BPF_MAXINSNS, and we assume that
627 * BPF_MAXINSNS is sufficiently small that adding 255
628 * to it won't overlflow
629 *
630 * We know that len is <= BPF_MAXINSNS, and we
631 * assume that BPF_MAXINSNS is less than the maximum
632 * size of a u_int, so that i+1 doesn't overflow
633 */
634 from = i+1;
635 switch (BPF_OP(p->code)) {
636 case BPF_JA:
637 if (from + p->k < from || from + p->k >= ((u_int)len))
638 return 0;
639 break;
640 case BPF_JEQ:
641 case BPF_JGT:
642 case BPF_JGE:
643 case BPF_JSET:
644 if (from + p->jt >= ((u_int)len) || from + p->jf >= ((u_int)len))
645 return 0;
646 break;
647 default:
648 return 0;
649 }
650 break;
651 case BPF_RET:
652 break;
653 case BPF_MISC:
654 break;
655 default:
656 return 0;
657 }
658 }
659 return BPF_CLASS(f[len - 1].code) == BPF_RET;
660 }
661 #endif