]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet6/esp_rijndael.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_rijndael.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
9bccf70c
A
29/* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30/* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/socket.h>
64#include <sys/queue.h>
91447636
A
65#include <sys/syslog.h>
66#include <sys/mbuf.h>
316670eb 67#include <sys/mcache.h>
91447636
A
68
69#include <kern/locks.h>
9bccf70c
A
70
71#include <net/if.h>
72#include <net/route.h>
73
74#include <netinet6/ipsec.h>
75#include <netinet6/esp.h>
76#include <netinet6/esp_rijndael.h>
77
316670eb 78#include <libkern/crypto/aes.h>
9bccf70c 79
2d21ac55
A
80#include <netkey/key.h>
81
9bccf70c
A
82#include <net/net_osdep.h>
83
91447636 84#define AES_BLOCKLEN 16
316670eb 85#define MAX_SBUF_LEN 2000
91447636
A
86
87extern lck_mtx_t *sadb_mutex;
88
9bccf70c 89int
2d21ac55
A
90esp_aes_schedlen(
91 __unused const struct esp_algorithm *algo)
9bccf70c
A
92{
93
91447636 94 return sizeof(aes_ctx);
9bccf70c
A
95}
96
97int
2d21ac55
A
98esp_aes_schedule(
99 __unused const struct esp_algorithm *algo,
100 struct secasvar *sav)
9bccf70c 101{
2d21ac55
A
102
103 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
91447636
A
104 aes_ctx *ctx = (aes_ctx*)sav->sched;
105
b0d623f7
A
106 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
107 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
91447636 108
9bccf70c
A
109 return 0;
110}
111
91447636
A
112
113/* The following 2 functions decrypt or encrypt the contents of
114 * the mbuf chain passed in keeping the IP and ESP header's in place,
115 * along with the IV.
116 * The code attempts to call the crypto code with the largest chunk
117 * of data it can based on the amount of source data in
118 * the current source mbuf and the space remaining in the current
119 * destination mbuf. The crypto code requires data to be a multiples
120 * of 16 bytes. A separate buffer is used when a 16 byte block spans
121 * mbufs.
122 *
123 * m = mbuf chain
124 * off = offset to ESP header
125 *
126 * local vars for source:
127 * soff = offset from beginning of the chain to the head of the
128 * current mbuf.
129 * scut = last mbuf that contains headers to be retained
130 * scutoff = offset to end of the headers in scut
131 * s = the current mbuf
132 * sn = current offset to data in s (next source data to process)
133 *
134 * local vars for dest:
135 * d0 = head of chain
136 * d = current mbuf
137 * dn = current offset in d (next location to store result)
138 */
139
140
9bccf70c 141int
91447636
A
142esp_cbc_decrypt_aes(m, off, sav, algo, ivlen)
143 struct mbuf *m;
144 size_t off;
9bccf70c 145 struct secasvar *sav;
91447636
A
146 const struct esp_algorithm *algo;
147 int ivlen;
9bccf70c 148{
91447636
A
149 struct mbuf *s;
150 struct mbuf *d, *d0, *dp;
151 int soff; /* offset from the head of chain, to head of this mbuf */
152 int sn, dn; /* offset from the head of the mbuf, to meat */
153 size_t ivoff, bodyoff;
316670eb
A
154 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
155 u_int8_t sbuf[MAX_SBUF_LEN] __attribute__((aligned(4))), *sp, *sp_unaligned;
91447636
A
156 struct mbuf *scut;
157 int scutoff;
158 int i, len;
159
160
161 if (ivlen != AES_BLOCKLEN) {
162 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
163 "unsupported ivlen %d\n", algo->name, ivlen));
164 m_freem(m);
165 return EINVAL;
166 }
167
168 if (sav->flags & SADB_X_EXT_OLD) {
169 /* RFC 1827 */
170 ivoff = off + sizeof(struct esp);
171 bodyoff = off + sizeof(struct esp) + ivlen;
172 } else {
173 ivoff = off + sizeof(struct newesp);
174 bodyoff = off + sizeof(struct newesp) + ivlen;
175 }
176
177 if (m->m_pkthdr.len < bodyoff) {
178 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
b0d623f7 179 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
91447636
A
180 m_freem(m);
181 return EINVAL;
182 }
183 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
184 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
185 "payload length must be multiple of %d\n",
186 algo->name, AES_BLOCKLEN));
187 m_freem(m);
188 return EINVAL;
189 }
190
191 /* grab iv */
b0d623f7 192 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
91447636 193
91447636
A
194 s = m;
195 soff = sn = dn = 0;
196 d = d0 = dp = NULL;
197 sp = dptr = NULL;
198
199 /* skip header/IV offset */
200 while (soff < bodyoff) {
201 if (soff + s->m_len > bodyoff) {
202 sn = bodyoff - soff;
203 break;
204 }
205
206 soff += s->m_len;
207 s = s->m_next;
208 }
209 scut = s;
210 scutoff = sn;
211
212 /* skip over empty mbuf */
213 while (s && s->m_len == 0)
214 s = s->m_next;
215
216 while (soff < m->m_pkthdr.len) {
217 /* source */
218 if (sn + AES_BLOCKLEN <= s->m_len) {
219 /* body is continuous */
220 sp = mtod(s, u_int8_t *) + sn;
221 len = s->m_len - sn;
222 len -= len % AES_BLOCKLEN; // full blocks only
223 } else {
224 /* body is non-continuous */
b0d623f7 225 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
91447636
A
226 sp = sbuf;
227 len = AES_BLOCKLEN; // 1 block only in sbuf
228 }
229
230 /* destination */
231 if (!d || dn + AES_BLOCKLEN > d->m_len) {
232 if (d)
233 dp = d;
234 MGET(d, M_DONTWAIT, MT_DATA);
235 i = m->m_pkthdr.len - (soff + sn);
236 if (d && i > MLEN) {
237 MCLGET(d, M_DONTWAIT);
238 if ((d->m_flags & M_EXT) == 0) {
2d21ac55
A
239 d = m_mbigget(d, M_DONTWAIT);
240 if ((d->m_flags & M_EXT) == 0) {
241 m_free(d);
242 d = NULL;
243 }
91447636
A
244 }
245 }
246 if (!d) {
247 m_freem(m);
248 if (d0)
249 m_freem(d0);
91447636
A
250 return ENOBUFS;
251 }
252 if (!d0)
253 d0 = d;
254 if (dp)
255 dp->m_next = d;
316670eb
A
256
257 // try to make mbuf data aligned
258 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
259 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
260 }
261
91447636
A
262 d->m_len = M_TRAILINGSPACE(d);
263 d->m_len -= d->m_len % AES_BLOCKLEN;
264 if (d->m_len > i)
265 d->m_len = i;
266 dptr = mtod(d, u_int8_t *);
267 dn = 0;
268 }
269
270 /* adjust len if greater than space available in dest */
271 if (len > d->m_len - dn)
272 len = d->m_len - dn;
273
274 /* decrypt */
316670eb
A
275 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
276 if (IPSEC_IS_P2ALIGNED(sp)) {
277 sp_unaligned = NULL;
278 } else {
279 sp_unaligned = sp;
280 sp = sbuf;
281 memcpy(sp, sp_unaligned, len);
282 }
283 // no need to check output pointer alignment
91447636
A
284 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
285 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
286
316670eb
A
287 // update unaligned pointers
288 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
289 sp = sp_unaligned;
290 }
291
91447636
A
292 /* udpate offsets */
293 sn += len;
294 dn += len;
295
296 // next iv
297 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
298
299 /* find the next source block */
300 while (s && sn >= s->m_len) {
301 sn -= s->m_len;
302 soff += s->m_len;
303 s = s->m_next;
304 }
305
306 }
307
308 /* free un-needed source mbufs and add dest mbufs to chain */
309 m_freem(scut->m_next);
310 scut->m_len = scutoff;
311 scut->m_next = d0;
312
313 /* just in case */
314 bzero(iv, sizeof(iv));
315 bzero(sbuf, sizeof(sbuf));
91447636 316
9bccf70c
A
317 return 0;
318}
319
320int
2d21ac55
A
321esp_cbc_encrypt_aes(
322 struct mbuf *m,
323 size_t off,
324 __unused size_t plen,
325 struct secasvar *sav,
326 const struct esp_algorithm *algo,
327 int ivlen)
9bccf70c 328{
91447636
A
329 struct mbuf *s;
330 struct mbuf *d, *d0, *dp;
2d21ac55 331 int soff; /* offset from the head of chain, to head of this mbuf */
91447636
A
332 int sn, dn; /* offset from the head of the mbuf, to meat */
333 size_t ivoff, bodyoff;
316670eb
A
334 u_int8_t *ivp, *dptr, *ivp_unaligned;
335 u_int8_t sbuf[MAX_SBUF_LEN] __attribute__((aligned(4))), *sp, *sp_unaligned;
336 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
91447636
A
337 struct mbuf *scut;
338 int scutoff;
339 int i, len;
340
341 if (ivlen != AES_BLOCKLEN) {
342 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
343 "unsupported ivlen %d\n", algo->name, ivlen));
344 m_freem(m);
345 return EINVAL;
346 }
347
348 if (sav->flags & SADB_X_EXT_OLD) {
349 /* RFC 1827 */
350 ivoff = off + sizeof(struct esp);
351 bodyoff = off + sizeof(struct esp) + ivlen;
352 } else {
353 ivoff = off + sizeof(struct newesp);
354 bodyoff = off + sizeof(struct newesp) + ivlen;
355 }
356
357 /* put iv into the packet */
358 m_copyback(m, ivoff, ivlen, sav->iv);
b0d623f7 359 ivp = (u_int8_t *) sav->iv;
91447636
A
360
361 if (m->m_pkthdr.len < bodyoff) {
362 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
b0d623f7 363 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
91447636
A
364 m_freem(m);
365 return EINVAL;
366 }
367 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
368 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
369 "payload length must be multiple of %lu\n",
370 algo->name, AES_BLOCKLEN));
371 m_freem(m);
372 return EINVAL;
373 }
91447636
A
374
375 s = m;
376 soff = sn = dn = 0;
377 d = d0 = dp = NULL;
378 sp = dptr = NULL;
379
380 /* skip headers/IV */
381 while (soff < bodyoff) {
382 if (soff + s->m_len > bodyoff) {
383 sn = bodyoff - soff;
384 break;
385 }
386
387 soff += s->m_len;
388 s = s->m_next;
389 }
390 scut = s;
391 scutoff = sn;
392
393 /* skip over empty mbuf */
394 while (s && s->m_len == 0)
395 s = s->m_next;
396
397 while (soff < m->m_pkthdr.len) {
398 /* source */
399 if (sn + AES_BLOCKLEN <= s->m_len) {
400 /* body is continuous */
401 sp = mtod(s, u_int8_t *) + sn;
402 len = s->m_len - sn;
403 len -= len % AES_BLOCKLEN; // full blocks only
404 } else {
405 /* body is non-continuous */
b0d623f7 406 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
91447636
A
407 sp = sbuf;
408 len = AES_BLOCKLEN; // 1 block only in sbuf
409 }
410
411 /* destination */
412 if (!d || dn + AES_BLOCKLEN > d->m_len) {
413 if (d)
414 dp = d;
415 MGET(d, M_DONTWAIT, MT_DATA);
416 i = m->m_pkthdr.len - (soff + sn);
417 if (d && i > MLEN) {
418 MCLGET(d, M_DONTWAIT);
419 if ((d->m_flags & M_EXT) == 0) {
2d21ac55
A
420 d = m_mbigget(d, M_DONTWAIT);
421 if ((d->m_flags & M_EXT) == 0) {
422 m_free(d);
423 d = NULL;
424 }
91447636
A
425 }
426 }
427 if (!d) {
428 m_freem(m);
429 if (d0)
430 m_freem(d0);
91447636
A
431 return ENOBUFS;
432 }
433 if (!d0)
434 d0 = d;
435 if (dp)
436 dp->m_next = d;
437
316670eb
A
438 // try to make mbuf data aligned
439 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
440 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
441 }
442
91447636
A
443 d->m_len = M_TRAILINGSPACE(d);
444 d->m_len -= d->m_len % AES_BLOCKLEN;
445 if (d->m_len > i)
446 d->m_len = i;
447 dptr = mtod(d, u_int8_t *);
448 dn = 0;
449 }
450
451 /* adjust len if greater than space available */
452 if (len > d->m_len - dn)
453 len = d->m_len - dn;
454
455 /* encrypt */
316670eb
A
456 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
457 if (IPSEC_IS_P2ALIGNED(sp)) {
458 sp_unaligned = NULL;
459 } else {
460 sp_unaligned = sp;
461 sp = sbuf;
462 memcpy(sp, sp_unaligned, len);
463 }
464 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
465 if (IPSEC_IS_P2ALIGNED(ivp)) {
466 ivp_unaligned = NULL;
467 } else {
468 ivp_unaligned = ivp;
469 ivp = ivp_aligned_buf;
470 memcpy(ivp, ivp_unaligned, len);
471 }
472 // no need to check output pointer alignment
91447636
A
473 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
474 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
475
316670eb
A
476 // update unaligned pointers
477 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
478 sp = sp_unaligned;
479 }
480 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
481 ivp = ivp_unaligned;
482 }
483
91447636
A
484 /* update offsets */
485 sn += len;
486 dn += len;
487
488 /* next iv */
489 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
490
491 /* find the next source block and skip empty mbufs */
492 while (s && sn >= s->m_len) {
493 sn -= s->m_len;
494 soff += s->m_len;
495 s = s->m_next;
496 }
497
498 }
499
500 /* free un-needed source mbufs and add dest mbufs to chain */
501 m_freem(scut->m_next);
502 scut->m_len = scutoff;
503 scut->m_next = d0;
504
505 /* just in case */
506 bzero(sbuf, sizeof(sbuf));
91447636
A
507 key_sa_stir_iv(sav);
508
9bccf70c
A
509 return 0;
510}