]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_rijndael.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_rijndael.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
66 #include <sys/mbuf.h>
67 #include <sys/mcache.h>
68
69 #include <kern/locks.h>
70
71 #include <net/if.h>
72 #include <net/route.h>
73
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
77
78 #include <libkern/crypto/aes.h>
79
80 #include <netkey/key.h>
81
82 #include <net/net_osdep.h>
83
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
86 #define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87 #define ESP_GCM_IVLEN 8
88 #define ESP_GCM_ALIGN 16
89
90 extern lck_mtx_t *sadb_mutex;
91
92 typedef struct {
93 ccgcm_ctx *decrypt;
94 ccgcm_ctx *encrypt;
95 ccgcm_ctx ctxt[0];
96 } aes_gcm_ctx;
97
98 int
99 esp_aes_schedlen(
100 __unused const struct esp_algorithm *algo)
101 {
102
103 return sizeof(aes_ctx);
104 }
105
106 int
107 esp_aes_schedule(
108 __unused const struct esp_algorithm *algo,
109 struct secasvar *sav)
110 {
111
112 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
113 aes_ctx *ctx = (aes_ctx*)sav->sched;
114
115 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
116 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
117
118 return 0;
119 }
120
121
122 /* The following 2 functions decrypt or encrypt the contents of
123 * the mbuf chain passed in keeping the IP and ESP header's in place,
124 * along with the IV.
125 * The code attempts to call the crypto code with the largest chunk
126 * of data it can based on the amount of source data in
127 * the current source mbuf and the space remaining in the current
128 * destination mbuf. The crypto code requires data to be a multiples
129 * of 16 bytes. A separate buffer is used when a 16 byte block spans
130 * mbufs.
131 *
132 * m = mbuf chain
133 * off = offset to ESP header
134 *
135 * local vars for source:
136 * soff = offset from beginning of the chain to the head of the
137 * current mbuf.
138 * scut = last mbuf that contains headers to be retained
139 * scutoff = offset to end of the headers in scut
140 * s = the current mbuf
141 * sn = current offset to data in s (next source data to process)
142 *
143 * local vars for dest:
144 * d0 = head of chain
145 * d = current mbuf
146 * dn = current offset in d (next location to store result)
147 */
148
149
150 int
151 esp_cbc_decrypt_aes(m, off, sav, algo, ivlen)
152 struct mbuf *m;
153 size_t off;
154 struct secasvar *sav;
155 const struct esp_algorithm *algo;
156 int ivlen;
157 {
158 struct mbuf *s;
159 struct mbuf *d, *d0, *dp;
160 int soff; /* offset from the head of chain, to head of this mbuf */
161 int sn, dn; /* offset from the head of the mbuf, to meat */
162 size_t ivoff, bodyoff;
163 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
164 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
165 struct mbuf *scut;
166 int scutoff;
167 int i, len;
168
169
170 if (ivlen != AES_BLOCKLEN) {
171 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
172 "unsupported ivlen %d\n", algo->name, ivlen));
173 m_freem(m);
174 return EINVAL;
175 }
176
177 if (sav->flags & SADB_X_EXT_OLD) {
178 /* RFC 1827 */
179 ivoff = off + sizeof(struct esp);
180 bodyoff = off + sizeof(struct esp) + ivlen;
181 } else {
182 ivoff = off + sizeof(struct newesp);
183 bodyoff = off + sizeof(struct newesp) + ivlen;
184 }
185
186 if (m->m_pkthdr.len < bodyoff) {
187 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
188 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
189 m_freem(m);
190 return EINVAL;
191 }
192 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
193 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
194 "payload length must be multiple of %d\n",
195 algo->name, AES_BLOCKLEN));
196 m_freem(m);
197 return EINVAL;
198 }
199
200 /* grab iv */
201 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
202
203 s = m;
204 soff = sn = dn = 0;
205 d = d0 = dp = NULL;
206 sp = dptr = NULL;
207
208 /* skip header/IV offset */
209 while (soff < bodyoff) {
210 if (soff + s->m_len > bodyoff) {
211 sn = bodyoff - soff;
212 break;
213 }
214
215 soff += s->m_len;
216 s = s->m_next;
217 }
218 scut = s;
219 scutoff = sn;
220
221 /* skip over empty mbuf */
222 while (s && s->m_len == 0)
223 s = s->m_next;
224
225 while (soff < m->m_pkthdr.len) {
226 /* source */
227 if (sn + AES_BLOCKLEN <= s->m_len) {
228 /* body is continuous */
229 sp = mtod(s, u_int8_t *) + sn;
230 len = s->m_len - sn;
231 len -= len % AES_BLOCKLEN; // full blocks only
232 } else {
233 /* body is non-continuous */
234 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
235 sp = sbuf;
236 len = AES_BLOCKLEN; // 1 block only in sbuf
237 }
238
239 /* destination */
240 if (!d || dn + AES_BLOCKLEN > d->m_len) {
241 if (d)
242 dp = d;
243 MGET(d, M_DONTWAIT, MT_DATA);
244 i = m->m_pkthdr.len - (soff + sn);
245 if (d && i > MLEN) {
246 MCLGET(d, M_DONTWAIT);
247 if ((d->m_flags & M_EXT) == 0) {
248 d = m_mbigget(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 m_free(d);
251 d = NULL;
252 }
253 }
254 }
255 if (!d) {
256 m_freem(m);
257 if (d0)
258 m_freem(d0);
259 return ENOBUFS;
260 }
261 if (!d0)
262 d0 = d;
263 if (dp)
264 dp->m_next = d;
265
266 // try to make mbuf data aligned
267 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
268 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
269 }
270
271 d->m_len = M_TRAILINGSPACE(d);
272 d->m_len -= d->m_len % AES_BLOCKLEN;
273 if (d->m_len > i)
274 d->m_len = i;
275 dptr = mtod(d, u_int8_t *);
276 dn = 0;
277 }
278
279 /* adjust len if greater than space available in dest */
280 if (len > d->m_len - dn)
281 len = d->m_len - dn;
282
283 /* decrypt */
284 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
285 if (IPSEC_IS_P2ALIGNED(sp)) {
286 sp_unaligned = NULL;
287 } else {
288 sp_unaligned = sp;
289 if (len > MAX_REALIGN_LEN) {
290 return ENOBUFS;
291 }
292 if (sp_aligned == NULL) {
293 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
294 if (sp_aligned == NULL)
295 return ENOMEM;
296 }
297 sp = sp_aligned;
298 memcpy(sp, sp_unaligned, len);
299 }
300 // no need to check output pointer alignment
301 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
302 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
303
304 // update unaligned pointers
305 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
306 sp = sp_unaligned;
307 }
308
309 /* udpate offsets */
310 sn += len;
311 dn += len;
312
313 // next iv
314 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
315
316 /* find the next source block */
317 while (s && sn >= s->m_len) {
318 sn -= s->m_len;
319 soff += s->m_len;
320 s = s->m_next;
321 }
322
323 }
324
325 /* free un-needed source mbufs and add dest mbufs to chain */
326 m_freem(scut->m_next);
327 scut->m_len = scutoff;
328 scut->m_next = d0;
329
330 // free memory
331 if (sp_aligned != NULL) {
332 FREE(sp_aligned, M_SECA);
333 sp_aligned = NULL;
334 }
335
336 /* just in case */
337 bzero(iv, sizeof(iv));
338 bzero(sbuf, sizeof(sbuf));
339
340 return 0;
341 }
342
343 int
344 esp_cbc_encrypt_aes(
345 struct mbuf *m,
346 size_t off,
347 __unused size_t plen,
348 struct secasvar *sav,
349 const struct esp_algorithm *algo,
350 int ivlen)
351 {
352 struct mbuf *s;
353 struct mbuf *d, *d0, *dp;
354 int soff; /* offset from the head of chain, to head of this mbuf */
355 int sn, dn; /* offset from the head of the mbuf, to meat */
356 size_t ivoff, bodyoff;
357 u_int8_t *ivp, *dptr, *ivp_unaligned;
358 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
359 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
360 struct mbuf *scut;
361 int scutoff;
362 int i, len;
363
364 if (ivlen != AES_BLOCKLEN) {
365 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
366 "unsupported ivlen %d\n", algo->name, ivlen));
367 m_freem(m);
368 return EINVAL;
369 }
370
371 if (sav->flags & SADB_X_EXT_OLD) {
372 /* RFC 1827 */
373 ivoff = off + sizeof(struct esp);
374 bodyoff = off + sizeof(struct esp) + ivlen;
375 } else {
376 ivoff = off + sizeof(struct newesp);
377 bodyoff = off + sizeof(struct newesp) + ivlen;
378 }
379
380 /* put iv into the packet */
381 m_copyback(m, ivoff, ivlen, sav->iv);
382 ivp = (u_int8_t *) sav->iv;
383
384 if (m->m_pkthdr.len < bodyoff) {
385 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
386 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
387 m_freem(m);
388 return EINVAL;
389 }
390 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
391 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
392 "payload length must be multiple of %lu\n",
393 algo->name, AES_BLOCKLEN));
394 m_freem(m);
395 return EINVAL;
396 }
397
398 s = m;
399 soff = sn = dn = 0;
400 d = d0 = dp = NULL;
401 sp = dptr = NULL;
402
403 /* skip headers/IV */
404 while (soff < bodyoff) {
405 if (soff + s->m_len > bodyoff) {
406 sn = bodyoff - soff;
407 break;
408 }
409
410 soff += s->m_len;
411 s = s->m_next;
412 }
413 scut = s;
414 scutoff = sn;
415
416 /* skip over empty mbuf */
417 while (s && s->m_len == 0)
418 s = s->m_next;
419
420 while (soff < m->m_pkthdr.len) {
421 /* source */
422 if (sn + AES_BLOCKLEN <= s->m_len) {
423 /* body is continuous */
424 sp = mtod(s, u_int8_t *) + sn;
425 len = s->m_len - sn;
426 len -= len % AES_BLOCKLEN; // full blocks only
427 } else {
428 /* body is non-continuous */
429 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
430 sp = sbuf;
431 len = AES_BLOCKLEN; // 1 block only in sbuf
432 }
433
434 /* destination */
435 if (!d || dn + AES_BLOCKLEN > d->m_len) {
436 if (d)
437 dp = d;
438 MGET(d, M_DONTWAIT, MT_DATA);
439 i = m->m_pkthdr.len - (soff + sn);
440 if (d && i > MLEN) {
441 MCLGET(d, M_DONTWAIT);
442 if ((d->m_flags & M_EXT) == 0) {
443 d = m_mbigget(d, M_DONTWAIT);
444 if ((d->m_flags & M_EXT) == 0) {
445 m_free(d);
446 d = NULL;
447 }
448 }
449 }
450 if (!d) {
451 m_freem(m);
452 if (d0)
453 m_freem(d0);
454 return ENOBUFS;
455 }
456 if (!d0)
457 d0 = d;
458 if (dp)
459 dp->m_next = d;
460
461 // try to make mbuf data aligned
462 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
463 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
464 }
465
466 d->m_len = M_TRAILINGSPACE(d);
467 d->m_len -= d->m_len % AES_BLOCKLEN;
468 if (d->m_len > i)
469 d->m_len = i;
470 dptr = mtod(d, u_int8_t *);
471 dn = 0;
472 }
473
474 /* adjust len if greater than space available */
475 if (len > d->m_len - dn)
476 len = d->m_len - dn;
477
478 /* encrypt */
479 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
480 if (IPSEC_IS_P2ALIGNED(sp)) {
481 sp_unaligned = NULL;
482 } else {
483 sp_unaligned = sp;
484 if (len > MAX_REALIGN_LEN) {
485 return ENOBUFS;
486 }
487 if (sp_aligned == NULL) {
488 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
489 if (sp_aligned == NULL)
490 return ENOMEM;
491 }
492 sp = sp_aligned;
493 memcpy(sp, sp_unaligned, len);
494 }
495 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
496 if (IPSEC_IS_P2ALIGNED(ivp)) {
497 ivp_unaligned = NULL;
498 } else {
499 ivp_unaligned = ivp;
500 ivp = ivp_aligned_buf;
501 memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
502 }
503 // no need to check output pointer alignment
504 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
505 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
506
507 // update unaligned pointers
508 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
509 sp = sp_unaligned;
510 }
511 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
512 ivp = ivp_unaligned;
513 }
514
515 /* update offsets */
516 sn += len;
517 dn += len;
518
519 /* next iv */
520 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
521
522 /* find the next source block and skip empty mbufs */
523 while (s && sn >= s->m_len) {
524 sn -= s->m_len;
525 soff += s->m_len;
526 s = s->m_next;
527 }
528 }
529
530 /* free un-needed source mbufs and add dest mbufs to chain */
531 m_freem(scut->m_next);
532 scut->m_len = scutoff;
533 scut->m_next = d0;
534
535 // free memory
536 if (sp_aligned != NULL) {
537 FREE(sp_aligned, M_SECA);
538 sp_aligned = NULL;
539 }
540
541 /* just in case */
542 bzero(sbuf, sizeof(sbuf));
543 key_sa_stir_iv(sav);
544
545 return 0;
546 }
547
548 int
549 esp_gcm_schedlen(
550 __unused const struct esp_algorithm *algo)
551 {
552 return (sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN);
553 }
554
555 int
556 esp_gcm_schedule( __unused const struct esp_algorithm *algo,
557 struct secasvar *sav)
558 {
559 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
560 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
561 int rc;
562
563 ctx->decrypt = &ctx->ctxt[0];
564 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
565
566 rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->decrypt);
567 if (rc) {
568 return (rc);
569 }
570
571 rc = aes_encrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->encrypt);
572 if (rc) {
573 return (rc);
574 }
575 return (rc);
576 }
577
578 int
579 esp_gcm_encrypt_finalize(struct secasvar *sav,
580 unsigned char *tag, unsigned int tag_bytes)
581 {
582 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
583 return (aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt));
584 }
585
586 int
587 esp_gcm_decrypt_finalize(struct secasvar *sav,
588 unsigned char *tag, unsigned int tag_bytes)
589 {
590 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
591 return (aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt));
592 }
593
594 int
595 esp_gcm_encrypt_aes(
596 struct mbuf *m,
597 size_t off,
598 __unused size_t plen,
599 struct secasvar *sav,
600 const struct esp_algorithm *algo __unused,
601 int ivlen)
602 {
603 struct mbuf *s;
604 struct mbuf *d, *d0, *dp;
605 int soff; /* offset from the head of chain, to head of this mbuf */
606 int sn, dn; /* offset from the head of the mbuf, to meat */
607 size_t ivoff, bodyoff;
608 u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL;
609 aes_gcm_ctx *ctx;
610 struct mbuf *scut;
611 int scutoff;
612 int i, len;
613 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
614
615 if (ivlen != ESP_GCM_IVLEN) {
616 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
617 m_freem(m);
618 return EINVAL;
619 }
620
621 if (sav->flags & SADB_X_EXT_OLD) {
622 /* RFC 1827 */
623 ivoff = off + sizeof(struct esp);
624 bodyoff = off + sizeof(struct esp) + ivlen;
625 } else {
626 ivoff = off + sizeof(struct newesp);
627 bodyoff = off + sizeof(struct newesp) + ivlen;
628 }
629
630 m_copyback(m, ivoff, ivlen, sav->iv);
631
632 if (m->m_pkthdr.len < bodyoff) {
633 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
634 m->m_pkthdr.len, (u_int32_t)bodyoff));
635 m_freem(m);
636 return EINVAL;
637 }
638
639 /* Set IV */
640 memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
641 memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen);
642
643 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
644 if (aes_encrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->encrypt)) {
645 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
646 m_freem(m);
647 bzero(nonce, sizeof(nonce));
648 return EINVAL;
649 }
650 bzero(nonce, sizeof(nonce));
651
652 /* Set Additional Authentication Data */
653 if (!(sav->flags & SADB_X_EXT_OLD)) {
654 struct newesp esp;
655 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
656 if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
657 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
658 m_freem(m);
659 return EINVAL;
660 }
661 }
662
663 s = m;
664 soff = sn = dn = 0;
665 d = d0 = dp = NULL;
666 sp = dptr = NULL;
667
668 /* skip headers/IV */
669 while (soff < bodyoff) {
670 if (soff + s->m_len > bodyoff) {
671 sn = bodyoff - soff;
672 break;
673 }
674
675 soff += s->m_len;
676 s = s->m_next;
677 }
678 scut = s;
679 scutoff = sn;
680
681 /* skip over empty mbuf */
682 while (s && s->m_len == 0)
683 s = s->m_next;
684
685 while (soff < m->m_pkthdr.len) {
686 /* source */
687 sp = mtod(s, u_int8_t *) + sn;
688 len = s->m_len - sn;
689
690 /* destination */
691 if (!d || (dn + len > d->m_len)) {
692 if (d)
693 dp = d;
694 MGET(d, M_DONTWAIT, MT_DATA);
695 i = m->m_pkthdr.len - (soff + sn);
696 if (d && i > MLEN) {
697 MCLGET(d, M_DONTWAIT);
698 if ((d->m_flags & M_EXT) == 0) {
699 d = m_mbigget(d, M_DONTWAIT);
700 if ((d->m_flags & M_EXT) == 0) {
701 m_free(d);
702 d = NULL;
703 }
704 }
705 }
706 if (!d) {
707 m_freem(m);
708 if (d0)
709 m_freem(d0);
710 return ENOBUFS;
711 }
712 if (!d0)
713 d0 = d;
714 if (dp)
715 dp->m_next = d;
716
717 // try to make mbuf data aligned
718 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
719 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
720 }
721
722 d->m_len = M_TRAILINGSPACE(d);
723
724 if (d->m_len > i)
725 d->m_len = i;
726
727 dptr = mtod(d, u_int8_t *);
728 dn = 0;
729 }
730
731 /* adjust len if greater than space available */
732 if (len > d->m_len - dn)
733 len = d->m_len - dn;
734
735 /* encrypt */
736 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
737 if (IPSEC_IS_P2ALIGNED(sp)) {
738 sp_unaligned = NULL;
739 } else {
740 sp_unaligned = sp;
741 if (len > MAX_REALIGN_LEN) {
742 return ENOBUFS;
743 }
744 if (sp_aligned == NULL) {
745 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
746 if (sp_aligned == NULL)
747 return ENOMEM;
748 }
749 sp = sp_aligned;
750 memcpy(sp, sp_unaligned, len);
751 }
752
753 if (aes_encrypt_gcm(sp, len, dptr+dn, ctx->encrypt)) {
754 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
755 m_freem(m);
756 return EINVAL;
757 }
758
759 // update unaligned pointers
760 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
761 sp = sp_unaligned;
762 }
763
764 /* update offsets */
765 sn += len;
766 dn += len;
767
768 /* find the next source block and skip empty mbufs */
769 while (s && sn >= s->m_len) {
770 sn -= s->m_len;
771 soff += s->m_len;
772 s = s->m_next;
773 }
774 }
775
776 /* free un-needed source mbufs and add dest mbufs to chain */
777 m_freem(scut->m_next);
778 scut->m_len = scutoff;
779 scut->m_next = d0;
780
781 // free memory
782 if (sp_aligned != NULL) {
783 FREE(sp_aligned, M_SECA);
784 sp_aligned = NULL;
785 }
786
787 /* generate new iv */
788 key_sa_stir_iv(sav);
789
790 return 0;
791 }
792
793 int
794 esp_gcm_decrypt_aes(m, off, sav, algo, ivlen)
795 struct mbuf *m;
796 size_t off;
797 struct secasvar *sav;
798 const struct esp_algorithm *algo __unused;
799 int ivlen;
800 {
801 struct mbuf *s;
802 struct mbuf *d, *d0, *dp;
803 int soff; /* offset from the head of chain, to head of this mbuf */
804 int sn, dn; /* offset from the head of the mbuf, to meat */
805 size_t ivoff, bodyoff;
806 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4))), *dptr;
807 u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL;
808 aes_gcm_ctx *ctx;
809 struct mbuf *scut;
810 int scutoff;
811 int i, len;
812 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
813
814 if (ivlen != ESP_GCM_IVLEN) {
815 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
816 m_freem(m);
817 return EINVAL;
818 }
819
820 if (sav->flags & SADB_X_EXT_OLD) {
821 /* RFC 1827 */
822 ivoff = off + sizeof(struct esp);
823 bodyoff = off + sizeof(struct esp) + ivlen;
824 } else {
825 ivoff = off + sizeof(struct newesp);
826 bodyoff = off + sizeof(struct newesp) + ivlen;
827 }
828
829 if (m->m_pkthdr.len < bodyoff) {
830 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
831 m->m_pkthdr.len, (u_int32_t)bodyoff));
832 m_freem(m);
833 return EINVAL;
834 }
835
836 /* grab iv */
837 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
838
839 /* Set IV */
840 memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
841 memcpy(nonce+ESP_GCM_SALT_LEN, iv, ivlen);
842
843 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
844 if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
845 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
846 m_freem(m);
847 bzero(nonce, sizeof(nonce));
848 return EINVAL;
849 }
850 bzero(nonce, sizeof(nonce));
851
852 /* Set Additional Authentication Data */
853 if (!(sav->flags & SADB_X_EXT_OLD)) {
854 struct newesp esp;
855 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
856 if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
857 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
858 return EINVAL;
859 }
860 }
861
862 s = m;
863 soff = sn = dn = 0;
864 d = d0 = dp = NULL;
865 sp = dptr = NULL;
866
867 /* skip header/IV offset */
868 while (soff < bodyoff) {
869 if (soff + s->m_len > bodyoff) {
870 sn = bodyoff - soff;
871 break;
872 }
873
874 soff += s->m_len;
875 s = s->m_next;
876 }
877 scut = s;
878 scutoff = sn;
879
880 /* skip over empty mbuf */
881 while (s && s->m_len == 0)
882 s = s->m_next;
883
884 while (soff < m->m_pkthdr.len) {
885 /* source */
886 sp = mtod(s, u_int8_t *) + sn;
887 len = s->m_len - sn;
888
889 /* destination */
890 if (!d || (dn + len > d->m_len)) {
891 if (d)
892 dp = d;
893 MGET(d, M_DONTWAIT, MT_DATA);
894 i = m->m_pkthdr.len - (soff + sn);
895 if (d && i > MLEN) {
896 MCLGET(d, M_DONTWAIT);
897 if ((d->m_flags & M_EXT) == 0) {
898 d = m_mbigget(d, M_DONTWAIT);
899 if ((d->m_flags & M_EXT) == 0) {
900 m_free(d);
901 d = NULL;
902 }
903 }
904 }
905 if (!d) {
906 m_freem(m);
907 if (d0)
908 m_freem(d0);
909 return ENOBUFS;
910 }
911 if (!d0)
912 d0 = d;
913 if (dp)
914 dp->m_next = d;
915
916 // try to make mbuf data aligned
917 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
918 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
919 }
920
921 d->m_len = M_TRAILINGSPACE(d);
922
923 if (d->m_len > i)
924 d->m_len = i;
925
926 dptr = mtod(d, u_int8_t *);
927 dn = 0;
928 }
929
930 /* adjust len if greater than space available in dest */
931 if (len > d->m_len - dn)
932 len = d->m_len - dn;
933
934 /* Decrypt */
935 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
936 if (IPSEC_IS_P2ALIGNED(sp)) {
937 sp_unaligned = NULL;
938 } else {
939 sp_unaligned = sp;
940 if (len > MAX_REALIGN_LEN) {
941 return ENOBUFS;
942 }
943 if (sp_aligned == NULL) {
944 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
945 if (sp_aligned == NULL)
946 return ENOMEM;
947 }
948 sp = sp_aligned;
949 memcpy(sp, sp_unaligned, len);
950 }
951 // no need to check output pointer alignment
952
953 if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) {
954 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
955 m_freem(m);
956 return EINVAL;
957 }
958
959 // update unaligned pointers
960 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
961 sp = sp_unaligned;
962 }
963
964 /* udpate offsets */
965 sn += len;
966 dn += len;
967
968 /* find the next source block */
969 while (s && sn >= s->m_len) {
970 sn -= s->m_len;
971 soff += s->m_len;
972 s = s->m_next;
973 }
974 }
975
976 /* free un-needed source mbufs and add dest mbufs to chain */
977 m_freem(scut->m_next);
978 scut->m_len = scutoff;
979 scut->m_next = d0;
980
981 // free memory
982 if (sp_aligned != NULL) {
983 FREE(sp_aligned, M_SECA);
984 sp_aligned = NULL;
985 }
986
987 /* just in case */
988 bzero(iv, sizeof(iv));
989
990 return 0;
991 }