]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_rijndael.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_rijndael.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
66 #include <sys/mbuf.h>
67 #include <sys/mcache.h>
68
69 #include <kern/locks.h>
70
71 #include <net/if.h>
72 #include <net/route.h>
73
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
77
78 #include <libkern/crypto/aes.h>
79
80 #include <netkey/key.h>
81
82 #include <net/net_osdep.h>
83
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
86 #define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87 #define ESP_GCM_IVLEN 8
88 #define ESP_GCM_ALIGN 16
89
90 extern lck_mtx_t *sadb_mutex;
91
92 typedef struct {
93 ccgcm_ctx *decrypt;
94 ccgcm_ctx *encrypt;
95 ccgcm_ctx ctxt[0];
96 } aes_gcm_ctx;
97
98 size_t
99 esp_aes_schedlen(
100 __unused const struct esp_algorithm *algo)
101 {
102 return sizeof(aes_ctx);
103 }
104
105 int
106 esp_aes_schedule(
107 __unused const struct esp_algorithm *algo,
108 struct secasvar *sav)
109 {
110 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
111 aes_ctx *ctx = (aes_ctx*)sav->sched;
112
113 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
114 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
115
116 return 0;
117 }
118
119
120 /* The following 2 functions decrypt or encrypt the contents of
121 * the mbuf chain passed in keeping the IP and ESP header's in place,
122 * along with the IV.
123 * The code attempts to call the crypto code with the largest chunk
124 * of data it can based on the amount of source data in
125 * the current source mbuf and the space remaining in the current
126 * destination mbuf. The crypto code requires data to be a multiples
127 * of 16 bytes. A separate buffer is used when a 16 byte block spans
128 * mbufs.
129 *
130 * m = mbuf chain
131 * off = offset to ESP header
132 *
133 * local vars for source:
134 * soff = offset from beginning of the chain to the head of the
135 * current mbuf.
136 * scut = last mbuf that contains headers to be retained
137 * scutoff = offset to end of the headers in scut
138 * s = the current mbuf
139 * sn = current offset to data in s (next source data to process)
140 *
141 * local vars for dest:
142 * d0 = head of chain
143 * d = current mbuf
144 * dn = current offset in d (next location to store result)
145 */
146
147
148 int
149 esp_cbc_decrypt_aes(
150 struct mbuf *m,
151 size_t off,
152 struct secasvar *sav,
153 const struct esp_algorithm *algo,
154 int ivlen)
155 {
156 struct mbuf *s;
157 struct mbuf *d, *d0, *dp;
158 int soff; /* offset from the head of chain, to head of this mbuf */
159 int sn, dn; /* offset from the head of the mbuf, to meat */
160 size_t ivoff, bodyoff;
161 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
162 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
163 struct mbuf *scut;
164 int scutoff;
165 int i, len;
166
167
168 if (ivlen != AES_BLOCKLEN) {
169 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
170 "unsupported ivlen %d\n", algo->name, ivlen));
171 m_freem(m);
172 return EINVAL;
173 }
174
175 if (sav->flags & SADB_X_EXT_OLD) {
176 /* RFC 1827 */
177 ivoff = off + sizeof(struct esp);
178 bodyoff = off + sizeof(struct esp) + ivlen;
179 } else {
180 ivoff = off + sizeof(struct newesp);
181 bodyoff = off + sizeof(struct newesp) + ivlen;
182 }
183
184 if (m->m_pkthdr.len < bodyoff) {
185 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%u\n",
186 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
187 m_freem(m);
188 return EINVAL;
189 }
190 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
191 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
192 "payload length must be multiple of %d\n",
193 algo->name, AES_BLOCKLEN));
194 m_freem(m);
195 return EINVAL;
196 }
197
198 VERIFY(ivoff <= INT_MAX);
199
200 /* grab iv */
201 m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
202
203 s = m;
204 soff = sn = dn = 0;
205 d = d0 = dp = NULL;
206 sp = dptr = NULL;
207
208 /* skip header/IV offset */
209 while (soff < bodyoff) {
210 if (soff + s->m_len > bodyoff) {
211 sn = (int)(bodyoff - soff);
212 break;
213 }
214
215 soff += s->m_len;
216 s = s->m_next;
217 }
218 scut = s;
219 scutoff = sn;
220
221 /* skip over empty mbuf */
222 while (s && s->m_len == 0) {
223 s = s->m_next;
224 }
225
226 while (soff < m->m_pkthdr.len) {
227 /* source */
228 if (sn + AES_BLOCKLEN <= s->m_len) {
229 /* body is continuous */
230 sp = mtod(s, u_int8_t *) + sn;
231 len = s->m_len - sn;
232 len -= len % AES_BLOCKLEN; // full blocks only
233 } else {
234 /* body is non-continuous */
235 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
236 sp = sbuf;
237 len = AES_BLOCKLEN; // 1 block only in sbuf
238 }
239
240 /* destination */
241 if (!d || dn + AES_BLOCKLEN > d->m_len) {
242 if (d) {
243 dp = d;
244 }
245 MGET(d, M_DONTWAIT, MT_DATA);
246 i = m->m_pkthdr.len - (soff + sn);
247 if (d && i > MLEN) {
248 MCLGET(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 d = m_mbigget(d, M_DONTWAIT);
251 if ((d->m_flags & M_EXT) == 0) {
252 m_free(d);
253 d = NULL;
254 }
255 }
256 }
257 if (!d) {
258 m_freem(m);
259 if (d0) {
260 m_freem(d0);
261 }
262 return ENOBUFS;
263 }
264 if (!d0) {
265 d0 = d;
266 }
267 if (dp) {
268 dp->m_next = d;
269 }
270
271 // try to make mbuf data aligned
272 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
273 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
274 }
275
276 d->m_len = (int)M_TRAILINGSPACE(d);
277 d->m_len -= d->m_len % AES_BLOCKLEN;
278 if (d->m_len > i) {
279 d->m_len = i;
280 }
281 dptr = mtod(d, u_int8_t *);
282 dn = 0;
283 }
284
285 /* adjust len if greater than space available in dest */
286 if (len > d->m_len - dn) {
287 len = d->m_len - dn;
288 }
289
290 /* decrypt */
291 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
292 if (IPSEC_IS_P2ALIGNED(sp)) {
293 sp_unaligned = NULL;
294 } else {
295 sp_unaligned = sp;
296 if (len > MAX_REALIGN_LEN) {
297 m_freem(m);
298 if (d0 != NULL) {
299 m_freem(d0);
300 }
301 if (sp_aligned != NULL) {
302 FREE(sp_aligned, M_SECA);
303 sp_aligned = NULL;
304 }
305 return ENOBUFS;
306 }
307 if (sp_aligned == NULL) {
308 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
309 if (sp_aligned == NULL) {
310 m_freem(m);
311 if (d0 != NULL) {
312 m_freem(d0);
313 }
314 return ENOMEM;
315 }
316 }
317 sp = sp_aligned;
318 memcpy(sp, sp_unaligned, len);
319 }
320 // no need to check output pointer alignment
321 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
322 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
323
324 // update unaligned pointers
325 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
326 sp = sp_unaligned;
327 }
328
329 /* udpate offsets */
330 sn += len;
331 dn += len;
332
333 // next iv
334 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
335
336 /* find the next source block */
337 while (s && sn >= s->m_len) {
338 sn -= s->m_len;
339 soff += s->m_len;
340 s = s->m_next;
341 }
342 }
343
344 /* free un-needed source mbufs and add dest mbufs to chain */
345 m_freem(scut->m_next);
346 scut->m_len = scutoff;
347 scut->m_next = d0;
348
349 // free memory
350 if (sp_aligned != NULL) {
351 FREE(sp_aligned, M_SECA);
352 sp_aligned = NULL;
353 }
354
355 /* just in case */
356 bzero(iv, sizeof(iv));
357 bzero(sbuf, sizeof(sbuf));
358
359 return 0;
360 }
361
362 int
363 esp_cbc_encrypt_aes(
364 struct mbuf *m,
365 size_t off,
366 __unused size_t plen,
367 struct secasvar *sav,
368 const struct esp_algorithm *algo,
369 int ivlen)
370 {
371 struct mbuf *s;
372 struct mbuf *d, *d0, *dp;
373 int soff; /* offset from the head of chain, to head of this mbuf */
374 int sn, dn; /* offset from the head of the mbuf, to meat */
375 size_t ivoff, bodyoff;
376 u_int8_t *ivp, *dptr, *ivp_unaligned;
377 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
378 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
379 struct mbuf *scut;
380 int scutoff;
381 int i, len;
382
383 if (ivlen != AES_BLOCKLEN) {
384 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
385 "unsupported ivlen %d\n", algo->name, ivlen));
386 m_freem(m);
387 return EINVAL;
388 }
389
390 if (sav->flags & SADB_X_EXT_OLD) {
391 /* RFC 1827 */
392 ivoff = off + sizeof(struct esp);
393 bodyoff = off + sizeof(struct esp) + ivlen;
394 } else {
395 ivoff = off + sizeof(struct newesp);
396 bodyoff = off + sizeof(struct newesp) + ivlen;
397 }
398
399 VERIFY(ivoff <= INT_MAX);
400
401 /* put iv into the packet */
402 m_copyback(m, (int)ivoff, ivlen, sav->iv);
403 ivp = (u_int8_t *) sav->iv;
404
405 if (m->m_pkthdr.len < bodyoff) {
406 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%u\n",
407 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
408 m_freem(m);
409 return EINVAL;
410 }
411 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
412 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
413 "payload length must be multiple of %d\n",
414 algo->name, AES_BLOCKLEN));
415 m_freem(m);
416 return EINVAL;
417 }
418
419 s = m;
420 soff = sn = dn = 0;
421 d = d0 = dp = NULL;
422 sp = dptr = NULL;
423
424 /* skip headers/IV */
425 while (soff < bodyoff) {
426 if (soff + s->m_len > bodyoff) {
427 sn = (int)(bodyoff - soff);
428 break;
429 }
430
431 soff += s->m_len;
432 s = s->m_next;
433 }
434 scut = s;
435 scutoff = sn;
436
437 /* skip over empty mbuf */
438 while (s && s->m_len == 0) {
439 s = s->m_next;
440 }
441
442 while (soff < m->m_pkthdr.len) {
443 /* source */
444 if (sn + AES_BLOCKLEN <= s->m_len) {
445 /* body is continuous */
446 sp = mtod(s, u_int8_t *) + sn;
447 len = s->m_len - sn;
448 len -= len % AES_BLOCKLEN; // full blocks only
449 } else {
450 /* body is non-continuous */
451 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
452 sp = sbuf;
453 len = AES_BLOCKLEN; // 1 block only in sbuf
454 }
455
456 /* destination */
457 if (!d || dn + AES_BLOCKLEN > d->m_len) {
458 if (d) {
459 dp = d;
460 }
461 MGET(d, M_DONTWAIT, MT_DATA);
462 i = m->m_pkthdr.len - (soff + sn);
463 if (d && i > MLEN) {
464 MCLGET(d, M_DONTWAIT);
465 if ((d->m_flags & M_EXT) == 0) {
466 d = m_mbigget(d, M_DONTWAIT);
467 if ((d->m_flags & M_EXT) == 0) {
468 m_free(d);
469 d = NULL;
470 }
471 }
472 }
473 if (!d) {
474 m_freem(m);
475 if (d0) {
476 m_freem(d0);
477 }
478 return ENOBUFS;
479 }
480 if (!d0) {
481 d0 = d;
482 }
483 if (dp) {
484 dp->m_next = d;
485 }
486
487 // try to make mbuf data aligned
488 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
489 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
490 }
491
492 d->m_len = (int)M_TRAILINGSPACE(d);
493 d->m_len -= d->m_len % AES_BLOCKLEN;
494 if (d->m_len > i) {
495 d->m_len = i;
496 }
497 dptr = mtod(d, u_int8_t *);
498 dn = 0;
499 }
500
501 /* adjust len if greater than space available */
502 if (len > d->m_len - dn) {
503 len = d->m_len - dn;
504 }
505
506 /* encrypt */
507 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
508 if (IPSEC_IS_P2ALIGNED(sp)) {
509 sp_unaligned = NULL;
510 } else {
511 sp_unaligned = sp;
512 if (len > MAX_REALIGN_LEN) {
513 m_freem(m);
514 if (d0) {
515 m_freem(d0);
516 }
517 if (sp_aligned != NULL) {
518 FREE(sp_aligned, M_SECA);
519 sp_aligned = NULL;
520 }
521 return ENOBUFS;
522 }
523 if (sp_aligned == NULL) {
524 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
525 if (sp_aligned == NULL) {
526 m_freem(m);
527 if (d0) {
528 m_freem(d0);
529 }
530 return ENOMEM;
531 }
532 }
533 sp = sp_aligned;
534 memcpy(sp, sp_unaligned, len);
535 }
536 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
537 if (IPSEC_IS_P2ALIGNED(ivp)) {
538 ivp_unaligned = NULL;
539 } else {
540 ivp_unaligned = ivp;
541 ivp = ivp_aligned_buf;
542 memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
543 }
544 // no need to check output pointer alignment
545 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
546 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
547
548 // update unaligned pointers
549 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
550 sp = sp_unaligned;
551 }
552 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
553 ivp = ivp_unaligned;
554 }
555
556 /* update offsets */
557 sn += len;
558 dn += len;
559
560 /* next iv */
561 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
562
563 /* find the next source block and skip empty mbufs */
564 while (s && sn >= s->m_len) {
565 sn -= s->m_len;
566 soff += s->m_len;
567 s = s->m_next;
568 }
569 }
570
571 /* free un-needed source mbufs and add dest mbufs to chain */
572 m_freem(scut->m_next);
573 scut->m_len = scutoff;
574 scut->m_next = d0;
575
576 // free memory
577 if (sp_aligned != NULL) {
578 FREE(sp_aligned, M_SECA);
579 sp_aligned = NULL;
580 }
581
582 /* just in case */
583 bzero(sbuf, sizeof(sbuf));
584 key_sa_stir_iv(sav);
585
586 return 0;
587 }
588
589 size_t
590 esp_gcm_schedlen(
591 __unused const struct esp_algorithm *algo)
592 {
593 return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN;
594 }
595
596 int
597 esp_gcm_schedule( __unused const struct esp_algorithm *algo,
598 struct secasvar *sav)
599 {
600 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
601 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
602 u_int ivlen = sav->ivlen;
603 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
604 int rc;
605
606 ctx->decrypt = &ctx->ctxt[0];
607 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
608
609 rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->decrypt);
610 if (rc) {
611 return rc;
612 }
613
614 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
615 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
616 memcpy(nonce + ESP_GCM_SALT_LEN, sav->iv, ivlen);
617
618 rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
619 if (rc) {
620 return rc;
621 }
622
623 rc = aes_encrypt_reset_gcm(ctx->encrypt);
624 if (rc) {
625 return rc;
626 }
627
628 return rc;
629 }
630
631 int
632 esp_gcm_encrypt_finalize(struct secasvar *sav,
633 unsigned char *tag, size_t tag_bytes)
634 {
635 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
636 return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt);
637 }
638
639 int
640 esp_gcm_decrypt_finalize(struct secasvar *sav,
641 unsigned char *tag, size_t tag_bytes)
642 {
643 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
644 return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt);
645 }
646
647 int
648 esp_gcm_encrypt_aes(
649 struct mbuf *m,
650 size_t off,
651 __unused size_t plen,
652 struct secasvar *sav,
653 const struct esp_algorithm *algo __unused,
654 int ivlen)
655 {
656 struct mbuf *s;
657 struct mbuf *d, *d0, *dp;
658 int soff; /* offset from the head of chain, to head of this mbuf */
659 int sn, dn; /* offset from the head of the mbuf, to meat */
660 size_t ivoff, bodyoff;
661 u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL;
662 aes_gcm_ctx *ctx;
663 struct mbuf *scut;
664 int scutoff;
665 int i, len;
666 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
667
668 if (ivlen != ESP_GCM_IVLEN) {
669 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
670 m_freem(m);
671 return EINVAL;
672 }
673
674 if (sav->flags & SADB_X_EXT_OLD) {
675 /* RFC 1827 */
676 ivoff = off + sizeof(struct esp);
677 bodyoff = off + sizeof(struct esp) + ivlen;
678 } else {
679 ivoff = off + sizeof(struct newesp);
680 bodyoff = off + sizeof(struct newesp) + ivlen;
681 }
682
683 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
684 /* generate new iv */
685 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
686
687 if (aes_encrypt_reset_gcm(ctx->encrypt)) {
688 ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
689 m_freem(m);
690 return EINVAL;
691 }
692
693 if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
694 ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
695 m_freem(m);
696 return EINVAL;
697 }
698
699 VERIFY(ivoff <= INT_MAX);
700
701 /*
702 * The IV is now generated within corecrypto and
703 * is provided to ESP using aes_encrypt_inc_iv_gcm().
704 * This makes the sav->iv redundant and is no longer
705 * used in GCM operations. But we still copy the IV
706 * back to sav->iv to ensure that any future code reading
707 * this value will get the latest IV.
708 */
709 memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
710 m_copyback(m, (int)ivoff, ivlen, sav->iv);
711 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
712
713 if (m->m_pkthdr.len < bodyoff) {
714 ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
715 m->m_pkthdr.len, (u_int32_t)bodyoff));
716 m_freem(m);
717 return EINVAL;
718 }
719
720 VERIFY(off <= INT_MAX);
721
722 /* Set Additional Authentication Data */
723 if (!(sav->flags & SADB_X_EXT_OLD)) {
724 struct newesp esp;
725 m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
726 if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
727 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
728 m_freem(m);
729 return EINVAL;
730 }
731 }
732
733 s = m;
734 soff = sn = dn = 0;
735 d = d0 = dp = NULL;
736 sp = dptr = NULL;
737
738 /* skip headers/IV */
739 while (soff < bodyoff) {
740 if (soff + s->m_len > bodyoff) {
741 sn = (int)(bodyoff - soff);
742 break;
743 }
744
745 soff += s->m_len;
746 s = s->m_next;
747 }
748 scut = s;
749 scutoff = sn;
750
751 /* skip over empty mbuf */
752 while (s && s->m_len == 0) {
753 s = s->m_next;
754 }
755
756 while (soff < m->m_pkthdr.len) {
757 /* source */
758 sp = mtod(s, u_int8_t *) + sn;
759 len = s->m_len - sn;
760
761 /* destination */
762 if (!d || (dn + len > d->m_len)) {
763 if (d) {
764 dp = d;
765 }
766 MGET(d, M_DONTWAIT, MT_DATA);
767 i = m->m_pkthdr.len - (soff + sn);
768 if (d && i > MLEN) {
769 MCLGET(d, M_DONTWAIT);
770 if ((d->m_flags & M_EXT) == 0) {
771 d = m_mbigget(d, M_DONTWAIT);
772 if ((d->m_flags & M_EXT) == 0) {
773 m_free(d);
774 d = NULL;
775 }
776 }
777 }
778 if (!d) {
779 m_freem(m);
780 if (d0) {
781 m_freem(d0);
782 }
783 return ENOBUFS;
784 }
785 if (!d0) {
786 d0 = d;
787 }
788 if (dp) {
789 dp->m_next = d;
790 }
791
792 // try to make mbuf data aligned
793 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
794 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
795 }
796
797 d->m_len = (int)M_TRAILINGSPACE(d);
798
799 if (d->m_len > i) {
800 d->m_len = i;
801 }
802
803 dptr = mtod(d, u_int8_t *);
804 dn = 0;
805 }
806
807 /* adjust len if greater than space available */
808 if (len > d->m_len - dn) {
809 len = d->m_len - dn;
810 }
811
812 /* encrypt */
813 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
814 if (IPSEC_IS_P2ALIGNED(sp)) {
815 sp_unaligned = NULL;
816 } else {
817 sp_unaligned = sp;
818 if (len > MAX_REALIGN_LEN) {
819 m_freem(m);
820 if (d0) {
821 m_freem(d0);
822 }
823 if (sp_aligned != NULL) {
824 FREE(sp_aligned, M_SECA);
825 sp_aligned = NULL;
826 }
827 return ENOBUFS;
828 }
829 if (sp_aligned == NULL) {
830 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
831 if (sp_aligned == NULL) {
832 m_freem(m);
833 if (d0) {
834 m_freem(d0);
835 }
836 return ENOMEM;
837 }
838 }
839 sp = sp_aligned;
840 memcpy(sp, sp_unaligned, len);
841 }
842
843 if (aes_encrypt_gcm(sp, len, dptr + dn, ctx->encrypt)) {
844 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
845 m_freem(m);
846 return EINVAL;
847 }
848
849 // update unaligned pointers
850 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
851 sp = sp_unaligned;
852 }
853
854 /* update offsets */
855 sn += len;
856 dn += len;
857
858 /* find the next source block and skip empty mbufs */
859 while (s && sn >= s->m_len) {
860 sn -= s->m_len;
861 soff += s->m_len;
862 s = s->m_next;
863 }
864 }
865
866 /* free un-needed source mbufs and add dest mbufs to chain */
867 m_freem(scut->m_next);
868 scut->m_len = scutoff;
869 scut->m_next = d0;
870
871 // free memory
872 if (sp_aligned != NULL) {
873 FREE(sp_aligned, M_SECA);
874 sp_aligned = NULL;
875 }
876
877 return 0;
878 }
879
880 int
881 esp_gcm_decrypt_aes(
882 struct mbuf *m,
883 size_t off,
884 struct secasvar *sav,
885 const struct esp_algorithm *algo __unused,
886 int ivlen)
887 {
888 struct mbuf *s;
889 struct mbuf *d, *d0, *dp;
890 int soff; /* offset from the head of chain, to head of this mbuf */
891 int sn, dn; /* offset from the head of the mbuf, to meat */
892 size_t ivoff, bodyoff;
893 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4))), *dptr;
894 u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL;
895 aes_gcm_ctx *ctx;
896 struct mbuf *scut;
897 int scutoff;
898 int i, len;
899 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
900
901 if (ivlen != ESP_GCM_IVLEN) {
902 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
903 m_freem(m);
904 return EINVAL;
905 }
906
907 if (sav->flags & SADB_X_EXT_OLD) {
908 /* RFC 1827 */
909 ivoff = off + sizeof(struct esp);
910 bodyoff = off + sizeof(struct esp) + ivlen;
911 } else {
912 ivoff = off + sizeof(struct newesp);
913 bodyoff = off + sizeof(struct newesp) + ivlen;
914 }
915
916 if (m->m_pkthdr.len < bodyoff) {
917 ipseclog((LOG_ERR, "%s: bad len %d/%u\n", __FUNCTION__,
918 m->m_pkthdr.len, (u_int32_t)bodyoff));
919 m_freem(m);
920 return EINVAL;
921 }
922
923 VERIFY(ivoff <= INT_MAX);
924
925 /* grab iv */
926 m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv);
927
928 /* Set IV */
929 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
930 memcpy(nonce + ESP_GCM_SALT_LEN, iv, ivlen);
931
932 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
933 if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
934 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
935 m_freem(m);
936 bzero(nonce, sizeof(nonce));
937 return EINVAL;
938 }
939 bzero(nonce, sizeof(nonce));
940
941 VERIFY(off <= INT_MAX);
942
943 /* Set Additional Authentication Data */
944 if (!(sav->flags & SADB_X_EXT_OLD)) {
945 struct newesp esp;
946 m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp);
947 if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
948 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
949 return EINVAL;
950 }
951 }
952
953 s = m;
954 soff = sn = dn = 0;
955 d = d0 = dp = NULL;
956 sp = dptr = NULL;
957
958 /* skip header/IV offset */
959 while (soff < bodyoff) {
960 if (soff + s->m_len > bodyoff) {
961 sn = (int)(bodyoff - soff);
962 break;
963 }
964
965 soff += s->m_len;
966 s = s->m_next;
967 }
968 scut = s;
969 scutoff = sn;
970
971 /* skip over empty mbuf */
972 while (s && s->m_len == 0) {
973 s = s->m_next;
974 }
975
976 while (soff < m->m_pkthdr.len) {
977 /* source */
978 sp = mtod(s, u_int8_t *) + sn;
979 len = s->m_len - sn;
980
981 /* destination */
982 if (!d || (dn + len > d->m_len)) {
983 if (d) {
984 dp = d;
985 }
986 MGET(d, M_DONTWAIT, MT_DATA);
987 i = m->m_pkthdr.len - (soff + sn);
988 if (d && i > MLEN) {
989 MCLGET(d, M_DONTWAIT);
990 if ((d->m_flags & M_EXT) == 0) {
991 d = m_mbigget(d, M_DONTWAIT);
992 if ((d->m_flags & M_EXT) == 0) {
993 m_free(d);
994 d = NULL;
995 }
996 }
997 }
998 if (!d) {
999 m_freem(m);
1000 if (d0) {
1001 m_freem(d0);
1002 }
1003 return ENOBUFS;
1004 }
1005 if (!d0) {
1006 d0 = d;
1007 }
1008 if (dp) {
1009 dp->m_next = d;
1010 }
1011
1012 // try to make mbuf data aligned
1013 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
1014 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
1015 }
1016
1017 d->m_len = (int)M_TRAILINGSPACE(d);
1018
1019 if (d->m_len > i) {
1020 d->m_len = i;
1021 }
1022
1023 dptr = mtod(d, u_int8_t *);
1024 dn = 0;
1025 }
1026
1027 /* adjust len if greater than space available in dest */
1028 if (len > d->m_len - dn) {
1029 len = d->m_len - dn;
1030 }
1031
1032 /* Decrypt */
1033 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
1034 if (IPSEC_IS_P2ALIGNED(sp)) {
1035 sp_unaligned = NULL;
1036 } else {
1037 sp_unaligned = sp;
1038 if (len > MAX_REALIGN_LEN) {
1039 m_freem(m);
1040 if (d0) {
1041 m_freem(d0);
1042 }
1043 if (sp_aligned != NULL) {
1044 FREE(sp_aligned, M_SECA);
1045 sp_aligned = NULL;
1046 }
1047 return ENOBUFS;
1048 }
1049 if (sp_aligned == NULL) {
1050 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
1051 if (sp_aligned == NULL) {
1052 m_freem(m);
1053 if (d0) {
1054 m_freem(d0);
1055 }
1056 return ENOMEM;
1057 }
1058 }
1059 sp = sp_aligned;
1060 memcpy(sp, sp_unaligned, len);
1061 }
1062 // no need to check output pointer alignment
1063
1064 if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) {
1065 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
1066 m_freem(m);
1067 return EINVAL;
1068 }
1069
1070 // update unaligned pointers
1071 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
1072 sp = sp_unaligned;
1073 }
1074
1075 /* udpate offsets */
1076 sn += len;
1077 dn += len;
1078
1079 /* find the next source block */
1080 while (s && sn >= s->m_len) {
1081 sn -= s->m_len;
1082 soff += s->m_len;
1083 s = s->m_next;
1084 }
1085 }
1086
1087 /* free un-needed source mbufs and add dest mbufs to chain */
1088 m_freem(scut->m_next);
1089 scut->m_len = scutoff;
1090 scut->m_next = d0;
1091
1092 // free memory
1093 if (sp_aligned != NULL) {
1094 FREE(sp_aligned, M_SECA);
1095 sp_aligned = NULL;
1096 }
1097
1098 /* just in case */
1099 bzero(iv, sizeof(iv));
1100
1101 return 0;
1102 }