]>
git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/ip_frag.c
ddb4f1ab4fc617dbe8f86686085f643f21c43647
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1993-1997 by Darren Reed.
25 * Redistribution and use in source and binary forms are permitted
26 * provided that this notice is preserved and due credit is given
27 * to the original author and the contributors.
30 /* static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-1995 Darren Reed"; */
38 #include <sys/errno.h>
39 #include <sys/types.h>
40 #include <sys/param.h>
44 #include <sys/filio.h>
45 #include <sys/fcntl.h>
46 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
52 #include <sys/protosw.h>
54 #include <sys/socket.h>
56 # include <sys/systm.h>
58 #if !defined(__SVR4) && !defined(__svr4__)
60 # include <sys/mbuf.h>
63 # include <sys/byteorder.h>
64 # include <sys/dditypes.h>
65 # include <sys/stream.h>
66 # include <sys/kmem.h>
69 #include <sys/malloc.h>
76 #include <net/route.h>
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
81 #include <netinet/ip_var.h>
83 #include <netinet/tcp.h>
84 #include <netinet/udp.h>
85 #include <netinet/ip_icmp.h>
86 #include "netinet/ip_compat.h"
87 #include <netinet/tcpip.h>
88 #include "netinet/ip_fil.h"
89 #include "netinet/ip_proxy.h"
90 #include "netinet/ip_nat.h"
91 #include "netinet/ip_frag.h"
92 #include "netinet/ip_state.h"
93 #include "netinet/ip_auth.h"
95 static ipfr_t
*ipfr_heads
[IPFT_SIZE
];
96 static ipfr_t
*ipfr_nattab
[IPFT_SIZE
];
97 static ipfrstat_t ipfr_stats
;
98 static int ipfr_inuse
= 0;
99 int fr_ipfrttl
= 120; /* 60 seconds */
101 extern int ipfr_timer_id
;
103 #if (SOLARIS || defined(__sgi)) && defined(KERNEL)
104 extern kmutex_t ipf_frag
;
105 extern kmutex_t ipf_natfrag
;
106 extern kmutex_t ipf_nat
;
110 static ipfr_t
*ipfr_new
__P((ip_t
*, fr_info_t
*, int, ipfr_t
**));
111 static ipfr_t
*ipfr_lookup
__P((ip_t
*, fr_info_t
*, ipfr_t
**));
114 ipfrstat_t
*ipfr_fragstats()
116 ipfr_stats
.ifs_table
= ipfr_heads
;
117 ipfr_stats
.ifs_nattab
= ipfr_nattab
;
118 ipfr_stats
.ifs_inuse
= ipfr_inuse
;
124 * add a new entry to the fragment cache, registering it as having come
125 * through this box, with the result of the filter operation.
127 static ipfr_t
*ipfr_new(ip
, fin
, pass
, table
)
133 ipfr_t
**fp
, *fr
, frag
;
136 frag
.ipfr_p
= ip
->ip_p
;
138 frag
.ipfr_id
= ip
->ip_id
;
140 frag
.ipfr_tos
= ip
->ip_tos
;
141 frag
.ipfr_src
.s_addr
= ip
->ip_src
.s_addr
;
142 idx
+= ip
->ip_src
.s_addr
;
143 frag
.ipfr_dst
.s_addr
= ip
->ip_dst
.s_addr
;
144 idx
+= ip
->ip_dst
.s_addr
;
149 * first, make sure it isn't already there...
151 for (fp
= &table
[idx
]; (fr
= *fp
); fp
= &fr
->ipfr_next
)
152 if (!bcmp((char *)&frag
.ipfr_src
, (char *)&fr
->ipfr_src
,
154 ipfr_stats
.ifs_exists
++;
159 * allocate some memory, if possible, if not, just record that we
162 KMALLOC(fr
, ipfr_t
*, sizeof(*fr
));
164 ipfr_stats
.ifs_nomem
++;
169 * Instert the fragment into the fragment table, copy the struct used
170 * in the search using bcopy rather than reassign each field.
171 * Set the ttl to the default and mask out logging from "pass"
173 if ((fr
->ipfr_next
= table
[idx
]))
174 table
[idx
]->ipfr_prev
= fr
;
175 fr
->ipfr_prev
= NULL
;
176 fr
->ipfr_data
= NULL
;
178 bcopy((char *)&frag
.ipfr_src
, (char *)&fr
->ipfr_src
, IPFR_CMPSZ
);
179 fr
->ipfr_ttl
= fr_ipfrttl
;
180 fr
->ipfr_pass
= pass
& ~(FR_LOGFIRST
|FR_LOG
);
182 * Compute the offset of the expected start of the next packet.
184 fr
->ipfr_off
= (ip
->ip_off
& 0x1fff) + (fin
->fin_dlen
>> 3);
185 ipfr_stats
.ifs_new
++;
191 int ipfr_newfrag(ip
, fin
, pass
)
198 MUTEX_ENTER(&ipf_frag
);
199 ipf
= ipfr_new(ip
, fin
, pass
, ipfr_heads
);
200 MUTEX_EXIT(&ipf_frag
);
205 int ipfr_nat_newfrag(ip
, fin
, pass
, nat
)
213 MUTEX_ENTER(&ipf_natfrag
);
214 if ((ipf
= ipfr_new(ip
, fin
, pass
, ipfr_nattab
))) {
215 ipf
->ipfr_data
= nat
;
218 MUTEX_EXIT(&ipf_natfrag
);
224 * check the fragment cache to see if there is already a record of this packet
225 * with its filter result known.
227 static ipfr_t
*ipfr_lookup(ip
, fin
, table
)
236 * For fragments, we record protocol, packet id, TOS and both IP#'s
237 * (these should all be the same for all fragments of a packet).
239 * build up a hash value to index the table with.
241 frag
.ipfr_p
= ip
->ip_p
;
243 frag
.ipfr_id
= ip
->ip_id
;
245 frag
.ipfr_tos
= ip
->ip_tos
;
246 frag
.ipfr_src
.s_addr
= ip
->ip_src
.s_addr
;
247 idx
+= ip
->ip_src
.s_addr
;
248 frag
.ipfr_dst
.s_addr
= ip
->ip_dst
.s_addr
;
249 idx
+= ip
->ip_dst
.s_addr
;
254 * check the table, careful to only compare the right amount of data
256 for (f
= table
[idx
]; f
; f
= f
->ipfr_next
)
257 if (!bcmp((char *)&frag
.ipfr_src
, (char *)&f
->ipfr_src
,
261 if (f
!= table
[idx
]) {
263 * move fragment info. to the top of the list
264 * to speed up searches.
266 if ((f
->ipfr_prev
->ipfr_next
= f
->ipfr_next
))
267 f
->ipfr_next
->ipfr_prev
= f
->ipfr_prev
;
268 f
->ipfr_next
= table
[idx
];
269 table
[idx
]->ipfr_prev
= f
;
274 atoff
= off
+ (fin
->fin_dlen
>> 3);
276 * If we've follwed the fragments, and this is the
277 * last (in order), shrink expiration time.
279 if ((off
& 0x1fff) == f
->ipfr_off
) {
285 ipfr_stats
.ifs_hits
++;
293 * functional interface for NAT lookups of the NAT fragment cache
295 nat_t
*ipfr_nat_knownfrag(ip
, fin
)
302 MUTEX_ENTER(&ipf_natfrag
);
303 ipf
= ipfr_lookup(ip
, fin
, ipfr_nattab
);
305 nat
= ipf
->ipfr_data
;
307 * This is the last fragment for this packet.
309 if (ipf
->ipfr_ttl
== 1) {
310 nat
->nat_data
= NULL
;
311 ipf
->ipfr_data
= NULL
;
315 MUTEX_EXIT(&ipf_natfrag
);
321 * functional interface for normal lookups of the fragment cache
323 int ipfr_knownfrag(ip
, fin
)
330 MUTEX_ENTER(&ipf_frag
);
331 ipf
= ipfr_lookup(ip
, fin
, ipfr_heads
);
332 ret
= ipf
? ipf
->ipfr_pass
: 0;
333 MUTEX_EXIT(&ipf_frag
);
339 * forget any references to this external object.
341 void ipfr_forget(nat
)
347 MUTEX_ENTER(&ipf_natfrag
);
348 for (idx
= IPFT_SIZE
- 1; idx
>= 0; idx
--)
349 for (fr
= ipfr_heads
[idx
]; fr
; fr
= fr
->ipfr_next
)
350 if (fr
->ipfr_data
== nat
)
351 fr
->ipfr_data
= NULL
;
353 MUTEX_EXIT(&ipf_natfrag
);
358 * Free memory in use by fragment state info. kept.
366 MUTEX_ENTER(&ipf_frag
);
367 for (idx
= IPFT_SIZE
- 1; idx
>= 0; idx
--)
368 for (fp
= &ipfr_heads
[idx
]; (fr
= *fp
); ) {
372 MUTEX_EXIT(&ipf_frag
);
374 MUTEX_ENTER(&ipf_nat
);
375 MUTEX_ENTER(&ipf_natfrag
);
376 for (idx
= IPFT_SIZE
- 1; idx
>= 0; idx
--)
377 for (fp
= &ipfr_nattab
[idx
]; (fr
= *fp
); ) {
379 if ((nat
= (nat_t
*)fr
->ipfr_data
)) {
380 if (nat
->nat_data
== fr
)
381 nat
->nat_data
= NULL
;
385 MUTEX_EXIT(&ipf_natfrag
);
386 MUTEX_EXIT(&ipf_nat
);
392 * Slowly expire held state for fragments. Timeouts are set * in expectation
393 * of this being called twice per second.
395 # if (BSD >= 199306) || SOLARIS || defined(__sgi)
396 void ipfr_slowtimer()
404 boolean_t funnel_state
;
406 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
408 ipfilter_sgi_intfsync();
412 MUTEX_ENTER(&ipf_frag
);
415 * Go through the entire table, looking for entries to expire,
416 * decreasing the ttl by one for each entry. If it reaches 0,
417 * remove it from the chain and free it.
419 for (idx
= IPFT_SIZE
- 1; idx
>= 0; idx
--)
420 for (fp
= &ipfr_heads
[idx
]; (fr
= *fp
); ) {
422 if (fr
->ipfr_ttl
== 0) {
424 fr
->ipfr_prev
->ipfr_next
=
427 fr
->ipfr_next
->ipfr_prev
=
430 ipfr_stats
.ifs_expire
++;
436 MUTEX_EXIT(&ipf_frag
);
439 * Same again for the NAT table, except that if the structure also
440 * still points to a NAT structure, and the NAT structure points back
441 * at the one to be free'd, NULL the reference from the NAT struct.
442 * NOTE: We need to grab both mutex's early, and in this order so as
443 * to prevent a deadlock if both try to expire at the same time.
445 MUTEX_ENTER(&ipf_nat
);
446 MUTEX_ENTER(&ipf_natfrag
);
447 for (idx
= IPFT_SIZE
- 1; idx
>= 0; idx
--)
448 for (fp
= &ipfr_nattab
[idx
]; (fr
= *fp
); ) {
450 if (fr
->ipfr_ttl
== 0) {
452 fr
->ipfr_prev
->ipfr_next
=
455 fr
->ipfr_next
->ipfr_prev
=
458 ipfr_stats
.ifs_expire
++;
460 if ((nat
= (nat_t
*)fr
->ipfr_data
)) {
461 if (nat
->nat_data
== fr
)
462 nat
->nat_data
= NULL
;
468 MUTEX_EXIT(&ipf_natfrag
);
469 MUTEX_EXIT(&ipf_nat
);
475 ipfr_timer_id
= timeout(ipfr_slowtimer
, NULL
, drv_usectohz(500000));
480 # if (BSD < 199306) && !defined(__sgi)
481 (void) thread_funnel_set(network_flock
, FALSE
);
485 (void) thread_funnel_set(network_flock
, FALSE
);
487 #endif /* defined(KERNEL) */