]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_rijndael.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_rijndael.c
1 /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
2 /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/socket.h>
36 #include <sys/queue.h>
37 #include <sys/syslog.h>
38 #include <sys/mbuf.h>
39
40 #include <kern/locks.h>
41
42 #include <net/if.h>
43 #include <net/route.h>
44
45 #include <netinet6/ipsec.h>
46 #include <netinet6/esp.h>
47 #include <netinet6/esp_rijndael.h>
48
49 #include <crypto/aes/aes.h>
50
51 #include <netkey/key.h>
52
53 #include <net/net_osdep.h>
54
55 #define AES_BLOCKLEN 16
56
57 extern lck_mtx_t *sadb_mutex;
58
59 int
60 esp_aes_schedlen(
61 __unused const struct esp_algorithm *algo)
62 {
63
64 return sizeof(aes_ctx);
65 }
66
67 int
68 esp_aes_schedule(
69 __unused const struct esp_algorithm *algo,
70 struct secasvar *sav)
71 {
72
73 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
74 aes_ctx *ctx = (aes_ctx*)sav->sched;
75
76 aes_decrypt_key(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
77 aes_encrypt_key(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
78
79 return 0;
80 }
81
82
83 /* The following 2 functions decrypt or encrypt the contents of
84 * the mbuf chain passed in keeping the IP and ESP header's in place,
85 * along with the IV.
86 * The code attempts to call the crypto code with the largest chunk
87 * of data it can based on the amount of source data in
88 * the current source mbuf and the space remaining in the current
89 * destination mbuf. The crypto code requires data to be a multiples
90 * of 16 bytes. A separate buffer is used when a 16 byte block spans
91 * mbufs.
92 *
93 * m = mbuf chain
94 * off = offset to ESP header
95 *
96 * local vars for source:
97 * soff = offset from beginning of the chain to the head of the
98 * current mbuf.
99 * scut = last mbuf that contains headers to be retained
100 * scutoff = offset to end of the headers in scut
101 * s = the current mbuf
102 * sn = current offset to data in s (next source data to process)
103 *
104 * local vars for dest:
105 * d0 = head of chain
106 * d = current mbuf
107 * dn = current offset in d (next location to store result)
108 */
109
110
111 int
112 esp_cbc_decrypt_aes(m, off, sav, algo, ivlen)
113 struct mbuf *m;
114 size_t off;
115 struct secasvar *sav;
116 const struct esp_algorithm *algo;
117 int ivlen;
118 {
119 struct mbuf *s;
120 struct mbuf *d, *d0, *dp;
121 int soff; /* offset from the head of chain, to head of this mbuf */
122 int sn, dn; /* offset from the head of the mbuf, to meat */
123 size_t ivoff, bodyoff;
124 u_int8_t iv[AES_BLOCKLEN], *dptr;
125 u_int8_t sbuf[AES_BLOCKLEN], *sp;
126 struct mbuf *scut;
127 int scutoff;
128 int i, len;
129
130
131 if (ivlen != AES_BLOCKLEN) {
132 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
133 "unsupported ivlen %d\n", algo->name, ivlen));
134 m_freem(m);
135 return EINVAL;
136 }
137
138 if (sav->flags & SADB_X_EXT_OLD) {
139 /* RFC 1827 */
140 ivoff = off + sizeof(struct esp);
141 bodyoff = off + sizeof(struct esp) + ivlen;
142 } else {
143 ivoff = off + sizeof(struct newesp);
144 bodyoff = off + sizeof(struct newesp) + ivlen;
145 }
146
147 if (m->m_pkthdr.len < bodyoff) {
148 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
149 algo->name, m->m_pkthdr.len, (unsigned long)bodyoff));
150 m_freem(m);
151 return EINVAL;
152 }
153 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
154 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
155 "payload length must be multiple of %d\n",
156 algo->name, AES_BLOCKLEN));
157 m_freem(m);
158 return EINVAL;
159 }
160
161 /* grab iv */
162 m_copydata(m, ivoff, ivlen, iv);
163
164 s = m;
165 soff = sn = dn = 0;
166 d = d0 = dp = NULL;
167 sp = dptr = NULL;
168
169 /* skip header/IV offset */
170 while (soff < bodyoff) {
171 if (soff + s->m_len > bodyoff) {
172 sn = bodyoff - soff;
173 break;
174 }
175
176 soff += s->m_len;
177 s = s->m_next;
178 }
179 scut = s;
180 scutoff = sn;
181
182 /* skip over empty mbuf */
183 while (s && s->m_len == 0)
184 s = s->m_next;
185
186 while (soff < m->m_pkthdr.len) {
187 /* source */
188 if (sn + AES_BLOCKLEN <= s->m_len) {
189 /* body is continuous */
190 sp = mtod(s, u_int8_t *) + sn;
191 len = s->m_len - sn;
192 len -= len % AES_BLOCKLEN; // full blocks only
193 } else {
194 /* body is non-continuous */
195 m_copydata(s, sn, AES_BLOCKLEN, sbuf);
196 sp = sbuf;
197 len = AES_BLOCKLEN; // 1 block only in sbuf
198 }
199
200 /* destination */
201 if (!d || dn + AES_BLOCKLEN > d->m_len) {
202 if (d)
203 dp = d;
204 MGET(d, M_DONTWAIT, MT_DATA);
205 i = m->m_pkthdr.len - (soff + sn);
206 if (d && i > MLEN) {
207 MCLGET(d, M_DONTWAIT);
208 if ((d->m_flags & M_EXT) == 0) {
209 d = m_mbigget(d, M_DONTWAIT);
210 if ((d->m_flags & M_EXT) == 0) {
211 m_free(d);
212 d = NULL;
213 }
214 }
215 }
216 if (!d) {
217 m_freem(m);
218 if (d0)
219 m_freem(d0);
220 return ENOBUFS;
221 }
222 if (!d0)
223 d0 = d;
224 if (dp)
225 dp->m_next = d;
226 d->m_len = M_TRAILINGSPACE(d);
227 d->m_len -= d->m_len % AES_BLOCKLEN;
228 if (d->m_len > i)
229 d->m_len = i;
230 dptr = mtod(d, u_int8_t *);
231 dn = 0;
232 }
233
234 /* adjust len if greater than space available in dest */
235 if (len > d->m_len - dn)
236 len = d->m_len - dn;
237
238 /* decrypt */
239 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
240 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
241
242 /* udpate offsets */
243 sn += len;
244 dn += len;
245
246 // next iv
247 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
248
249 /* find the next source block */
250 while (s && sn >= s->m_len) {
251 sn -= s->m_len;
252 soff += s->m_len;
253 s = s->m_next;
254 }
255
256 }
257
258 /* free un-needed source mbufs and add dest mbufs to chain */
259 m_freem(scut->m_next);
260 scut->m_len = scutoff;
261 scut->m_next = d0;
262
263 /* just in case */
264 bzero(iv, sizeof(iv));
265 bzero(sbuf, sizeof(sbuf));
266
267 return 0;
268 }
269
270 int
271 esp_cbc_encrypt_aes(
272 struct mbuf *m,
273 size_t off,
274 __unused size_t plen,
275 struct secasvar *sav,
276 const struct esp_algorithm *algo,
277 int ivlen)
278 {
279 struct mbuf *s;
280 struct mbuf *d, *d0, *dp;
281 int soff; /* offset from the head of chain, to head of this mbuf */
282 int sn, dn; /* offset from the head of the mbuf, to meat */
283 size_t ivoff, bodyoff;
284 u_int8_t *ivp, *dptr;
285 u_int8_t sbuf[AES_BLOCKLEN], *sp;
286 struct mbuf *scut;
287 int scutoff;
288 int i, len;
289
290 if (ivlen != AES_BLOCKLEN) {
291 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
292 "unsupported ivlen %d\n", algo->name, ivlen));
293 m_freem(m);
294 return EINVAL;
295 }
296
297 if (sav->flags & SADB_X_EXT_OLD) {
298 /* RFC 1827 */
299 ivoff = off + sizeof(struct esp);
300 bodyoff = off + sizeof(struct esp) + ivlen;
301 } else {
302 ivoff = off + sizeof(struct newesp);
303 bodyoff = off + sizeof(struct newesp) + ivlen;
304 }
305
306 /* put iv into the packet */
307 m_copyback(m, ivoff, ivlen, sav->iv);
308 ivp = sav->iv;
309
310 if (m->m_pkthdr.len < bodyoff) {
311 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
312 algo->name, m->m_pkthdr.len, (unsigned long)bodyoff));
313 m_freem(m);
314 return EINVAL;
315 }
316 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
317 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
318 "payload length must be multiple of %lu\n",
319 algo->name, AES_BLOCKLEN));
320 m_freem(m);
321 return EINVAL;
322 }
323
324 s = m;
325 soff = sn = dn = 0;
326 d = d0 = dp = NULL;
327 sp = dptr = NULL;
328
329 /* skip headers/IV */
330 while (soff < bodyoff) {
331 if (soff + s->m_len > bodyoff) {
332 sn = bodyoff - soff;
333 break;
334 }
335
336 soff += s->m_len;
337 s = s->m_next;
338 }
339 scut = s;
340 scutoff = sn;
341
342 /* skip over empty mbuf */
343 while (s && s->m_len == 0)
344 s = s->m_next;
345
346 while (soff < m->m_pkthdr.len) {
347 /* source */
348 if (sn + AES_BLOCKLEN <= s->m_len) {
349 /* body is continuous */
350 sp = mtod(s, u_int8_t *) + sn;
351 len = s->m_len - sn;
352 len -= len % AES_BLOCKLEN; // full blocks only
353 } else {
354 /* body is non-continuous */
355 m_copydata(s, sn, AES_BLOCKLEN, sbuf);
356 sp = sbuf;
357 len = AES_BLOCKLEN; // 1 block only in sbuf
358 }
359
360 /* destination */
361 if (!d || dn + AES_BLOCKLEN > d->m_len) {
362 if (d)
363 dp = d;
364 MGET(d, M_DONTWAIT, MT_DATA);
365 i = m->m_pkthdr.len - (soff + sn);
366 if (d && i > MLEN) {
367 MCLGET(d, M_DONTWAIT);
368 if ((d->m_flags & M_EXT) == 0) {
369 d = m_mbigget(d, M_DONTWAIT);
370 if ((d->m_flags & M_EXT) == 0) {
371 m_free(d);
372 d = NULL;
373 }
374 }
375 }
376 if (!d) {
377 m_freem(m);
378 if (d0)
379 m_freem(d0);
380 return ENOBUFS;
381 }
382 if (!d0)
383 d0 = d;
384 if (dp)
385 dp->m_next = d;
386
387 d->m_len = M_TRAILINGSPACE(d);
388 d->m_len -= d->m_len % AES_BLOCKLEN;
389 if (d->m_len > i)
390 d->m_len = i;
391 dptr = mtod(d, u_int8_t *);
392 dn = 0;
393 }
394
395 /* adjust len if greater than space available */
396 if (len > d->m_len - dn)
397 len = d->m_len - dn;
398
399 /* encrypt */
400 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
401 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
402
403 /* update offsets */
404 sn += len;
405 dn += len;
406
407 /* next iv */
408 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
409
410 /* find the next source block and skip empty mbufs */
411 while (s && sn >= s->m_len) {
412 sn -= s->m_len;
413 soff += s->m_len;
414 s = s->m_next;
415 }
416
417 }
418
419 /* free un-needed source mbufs and add dest mbufs to chain */
420 m_freem(scut->m_next);
421 scut->m_len = scutoff;
422 scut->m_next = d0;
423
424 /* just in case */
425 bzero(sbuf, sizeof(sbuf));
426 key_sa_stir_iv(sav);
427
428 return 0;
429 }