]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet6/esp_rijndael.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_rijndael.c
CommitLineData
b0d623f7 1/*
39037602 2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39037602 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39037602 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39037602 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39037602 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
9bccf70c
A
29/* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30/* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/socket.h>
64#include <sys/queue.h>
91447636
A
65#include <sys/syslog.h>
66#include <sys/mbuf.h>
316670eb 67#include <sys/mcache.h>
91447636
A
68
69#include <kern/locks.h>
9bccf70c
A
70
71#include <net/if.h>
72#include <net/route.h>
73
74#include <netinet6/ipsec.h>
75#include <netinet6/esp.h>
76#include <netinet6/esp_rijndael.h>
77
316670eb 78#include <libkern/crypto/aes.h>
9bccf70c 79
2d21ac55
A
80#include <netkey/key.h>
81
9bccf70c
A
82#include <net/net_osdep.h>
83
15129b1c 84#define MAX_REALIGN_LEN 2000
91447636 85#define AES_BLOCKLEN 16
3e170ce0
A
86#define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87#define ESP_GCM_IVLEN 8
88#define ESP_GCM_ALIGN 16
91447636
A
89
90extern lck_mtx_t *sadb_mutex;
91
3e170ce0 92typedef struct {
0a7de745
A
93 ccgcm_ctx *decrypt;
94 ccgcm_ctx *encrypt;
95 ccgcm_ctx ctxt[0];
3e170ce0
A
96} aes_gcm_ctx;
97
9bccf70c 98int
2d21ac55
A
99esp_aes_schedlen(
100 __unused const struct esp_algorithm *algo)
9bccf70c 101{
91447636 102 return sizeof(aes_ctx);
9bccf70c
A
103}
104
105int
2d21ac55
A
106esp_aes_schedule(
107 __unused const struct esp_algorithm *algo,
108 struct secasvar *sav)
9bccf70c 109{
5ba3f43e 110 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
91447636 111 aes_ctx *ctx = (aes_ctx*)sav->sched;
0a7de745 112
b0d623f7
A
113 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
114 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
0a7de745 115
9bccf70c
A
116 return 0;
117}
118
91447636
A
119
120/* The following 2 functions decrypt or encrypt the contents of
121 * the mbuf chain passed in keeping the IP and ESP header's in place,
122 * along with the IV.
123 * The code attempts to call the crypto code with the largest chunk
124 * of data it can based on the amount of source data in
125 * the current source mbuf and the space remaining in the current
126 * destination mbuf. The crypto code requires data to be a multiples
127 * of 16 bytes. A separate buffer is used when a 16 byte block spans
128 * mbufs.
129 *
130 * m = mbuf chain
131 * off = offset to ESP header
0a7de745 132 *
91447636
A
133 * local vars for source:
134 * soff = offset from beginning of the chain to the head of the
135 * current mbuf.
136 * scut = last mbuf that contains headers to be retained
137 * scutoff = offset to end of the headers in scut
138 * s = the current mbuf
139 * sn = current offset to data in s (next source data to process)
140 *
141 * local vars for dest:
142 * d0 = head of chain
143 * d = current mbuf
144 * dn = current offset in d (next location to store result)
145 */
0a7de745
A
146
147
9bccf70c 148int
39037602
A
149esp_cbc_decrypt_aes(
150 struct mbuf *m,
151 size_t off,
152 struct secasvar *sav,
153 const struct esp_algorithm *algo,
154 int ivlen)
9bccf70c 155{
91447636
A
156 struct mbuf *s;
157 struct mbuf *d, *d0, *dp;
0a7de745
A
158 int soff; /* offset from the head of chain, to head of this mbuf */
159 int sn, dn; /* offset from the head of the mbuf, to meat */
91447636 160 size_t ivoff, bodyoff;
316670eb 161 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
15129b1c 162 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
91447636
A
163 struct mbuf *scut;
164 int scutoff;
0a7de745
A
165 int i, len;
166
91447636 167
91447636
A
168 if (ivlen != AES_BLOCKLEN) {
169 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
170 "unsupported ivlen %d\n", algo->name, ivlen));
171 m_freem(m);
172 return EINVAL;
173 }
174
175 if (sav->flags & SADB_X_EXT_OLD) {
176 /* RFC 1827 */
177 ivoff = off + sizeof(struct esp);
178 bodyoff = off + sizeof(struct esp) + ivlen;
179 } else {
180 ivoff = off + sizeof(struct newesp);
181 bodyoff = off + sizeof(struct newesp) + ivlen;
182 }
183
184 if (m->m_pkthdr.len < bodyoff) {
185 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
b0d623f7 186 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
91447636
A
187 m_freem(m);
188 return EINVAL;
189 }
190 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
191 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
192 "payload length must be multiple of %d\n",
193 algo->name, AES_BLOCKLEN));
194 m_freem(m);
195 return EINVAL;
196 }
197
198 /* grab iv */
b0d623f7 199 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
91447636 200
91447636
A
201 s = m;
202 soff = sn = dn = 0;
203 d = d0 = dp = NULL;
204 sp = dptr = NULL;
0a7de745 205
91447636
A
206 /* skip header/IV offset */
207 while (soff < bodyoff) {
208 if (soff + s->m_len > bodyoff) {
209 sn = bodyoff - soff;
210 break;
211 }
212
213 soff += s->m_len;
214 s = s->m_next;
215 }
216 scut = s;
217 scutoff = sn;
218
219 /* skip over empty mbuf */
0a7de745 220 while (s && s->m_len == 0) {
91447636 221 s = s->m_next;
0a7de745
A
222 }
223
91447636
A
224 while (soff < m->m_pkthdr.len) {
225 /* source */
226 if (sn + AES_BLOCKLEN <= s->m_len) {
227 /* body is continuous */
228 sp = mtod(s, u_int8_t *) + sn;
229 len = s->m_len - sn;
0a7de745 230 len -= len % AES_BLOCKLEN; // full blocks only
91447636
A
231 } else {
232 /* body is non-continuous */
b0d623f7 233 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
91447636 234 sp = sbuf;
0a7de745 235 len = AES_BLOCKLEN; // 1 block only in sbuf
91447636
A
236 }
237
238 /* destination */
239 if (!d || dn + AES_BLOCKLEN > d->m_len) {
0a7de745 240 if (d) {
91447636 241 dp = d;
0a7de745 242 }
91447636
A
243 MGET(d, M_DONTWAIT, MT_DATA);
244 i = m->m_pkthdr.len - (soff + sn);
245 if (d && i > MLEN) {
246 MCLGET(d, M_DONTWAIT);
247 if ((d->m_flags & M_EXT) == 0) {
2d21ac55
A
248 d = m_mbigget(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 m_free(d);
251 d = NULL;
252 }
91447636
A
253 }
254 }
255 if (!d) {
256 m_freem(m);
0a7de745 257 if (d0) {
91447636 258 m_freem(d0);
0a7de745 259 }
91447636
A
260 return ENOBUFS;
261 }
0a7de745 262 if (!d0) {
91447636 263 d0 = d;
0a7de745
A
264 }
265 if (dp) {
91447636 266 dp->m_next = d;
0a7de745 267 }
316670eb
A
268
269 // try to make mbuf data aligned
270 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
271 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
272 }
273
91447636
A
274 d->m_len = M_TRAILINGSPACE(d);
275 d->m_len -= d->m_len % AES_BLOCKLEN;
0a7de745 276 if (d->m_len > i) {
91447636 277 d->m_len = i;
0a7de745
A
278 }
279 dptr = mtod(d, u_int8_t *);
91447636
A
280 dn = 0;
281 }
282
283 /* adjust len if greater than space available in dest */
0a7de745 284 if (len > d->m_len - dn) {
91447636 285 len = d->m_len - dn;
0a7de745 286 }
91447636
A
287
288 /* decrypt */
316670eb
A
289 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
290 if (IPSEC_IS_P2ALIGNED(sp)) {
291 sp_unaligned = NULL;
292 } else {
293 sp_unaligned = sp;
15129b1c 294 if (len > MAX_REALIGN_LEN) {
5ba3f43e
A
295 m_freem(m);
296 if (d0 != NULL) {
0a7de745 297 m_freem(d0);
5ba3f43e
A
298 }
299 if (sp_aligned != NULL) {
0a7de745
A
300 FREE(sp_aligned, M_SECA);
301 sp_aligned = NULL;
5ba3f43e 302 }
15129b1c
A
303 return ENOBUFS;
304 }
305 if (sp_aligned == NULL) {
306 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
5ba3f43e 307 if (sp_aligned == NULL) {
0a7de745
A
308 m_freem(m);
309 if (d0 != NULL) {
310 m_freem(d0);
311 }
312 return ENOMEM;
5ba3f43e 313 }
15129b1c
A
314 }
315 sp = sp_aligned;
316670eb
A
316 memcpy(sp, sp_unaligned, len);
317 }
318 // no need to check output pointer alignment
0a7de745
A
319 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
320 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
321
316670eb
A
322 // update unaligned pointers
323 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
324 sp = sp_unaligned;
325 }
326
91447636
A
327 /* udpate offsets */
328 sn += len;
329 dn += len;
0a7de745 330
91447636
A
331 // next iv
332 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
333
334 /* find the next source block */
335 while (s && sn >= s->m_len) {
336 sn -= s->m_len;
337 soff += s->m_len;
338 s = s->m_next;
339 }
91447636
A
340 }
341
342 /* free un-needed source mbufs and add dest mbufs to chain */
343 m_freem(scut->m_next);
344 scut->m_len = scutoff;
345 scut->m_next = d0;
346
15129b1c
A
347 // free memory
348 if (sp_aligned != NULL) {
349 FREE(sp_aligned, M_SECA);
350 sp_aligned = NULL;
351 }
0a7de745 352
91447636
A
353 /* just in case */
354 bzero(iv, sizeof(iv));
355 bzero(sbuf, sizeof(sbuf));
91447636 356
9bccf70c
A
357 return 0;
358}
359
360int
2d21ac55
A
361esp_cbc_encrypt_aes(
362 struct mbuf *m,
363 size_t off,
364 __unused size_t plen,
365 struct secasvar *sav,
366 const struct esp_algorithm *algo,
367 int ivlen)
9bccf70c 368{
91447636
A
369 struct mbuf *s;
370 struct mbuf *d, *d0, *dp;
0a7de745
A
371 int soff; /* offset from the head of chain, to head of this mbuf */
372 int sn, dn; /* offset from the head of the mbuf, to meat */
91447636 373 size_t ivoff, bodyoff;
316670eb 374 u_int8_t *ivp, *dptr, *ivp_unaligned;
15129b1c 375 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
316670eb 376 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
91447636
A
377 struct mbuf *scut;
378 int scutoff;
379 int i, len;
380
381 if (ivlen != AES_BLOCKLEN) {
382 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
383 "unsupported ivlen %d\n", algo->name, ivlen));
384 m_freem(m);
385 return EINVAL;
386 }
387
388 if (sav->flags & SADB_X_EXT_OLD) {
389 /* RFC 1827 */
390 ivoff = off + sizeof(struct esp);
391 bodyoff = off + sizeof(struct esp) + ivlen;
392 } else {
393 ivoff = off + sizeof(struct newesp);
394 bodyoff = off + sizeof(struct newesp) + ivlen;
395 }
396
397 /* put iv into the packet */
398 m_copyback(m, ivoff, ivlen, sav->iv);
b0d623f7 399 ivp = (u_int8_t *) sav->iv;
91447636
A
400
401 if (m->m_pkthdr.len < bodyoff) {
402 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
b0d623f7 403 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
91447636
A
404 m_freem(m);
405 return EINVAL;
406 }
407 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
408 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
409 "payload length must be multiple of %lu\n",
410 algo->name, AES_BLOCKLEN));
411 m_freem(m);
412 return EINVAL;
413 }
91447636
A
414
415 s = m;
416 soff = sn = dn = 0;
417 d = d0 = dp = NULL;
418 sp = dptr = NULL;
0a7de745 419
91447636
A
420 /* skip headers/IV */
421 while (soff < bodyoff) {
422 if (soff + s->m_len > bodyoff) {
423 sn = bodyoff - soff;
424 break;
425 }
426
427 soff += s->m_len;
428 s = s->m_next;
429 }
430 scut = s;
431 scutoff = sn;
432
433 /* skip over empty mbuf */
0a7de745 434 while (s && s->m_len == 0) {
91447636 435 s = s->m_next;
0a7de745
A
436 }
437
91447636
A
438 while (soff < m->m_pkthdr.len) {
439 /* source */
440 if (sn + AES_BLOCKLEN <= s->m_len) {
441 /* body is continuous */
442 sp = mtod(s, u_int8_t *) + sn;
443 len = s->m_len - sn;
0a7de745 444 len -= len % AES_BLOCKLEN; // full blocks only
91447636
A
445 } else {
446 /* body is non-continuous */
b0d623f7 447 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
91447636 448 sp = sbuf;
0a7de745 449 len = AES_BLOCKLEN; // 1 block only in sbuf
91447636
A
450 }
451
452 /* destination */
453 if (!d || dn + AES_BLOCKLEN > d->m_len) {
0a7de745 454 if (d) {
91447636 455 dp = d;
0a7de745 456 }
91447636
A
457 MGET(d, M_DONTWAIT, MT_DATA);
458 i = m->m_pkthdr.len - (soff + sn);
459 if (d && i > MLEN) {
460 MCLGET(d, M_DONTWAIT);
461 if ((d->m_flags & M_EXT) == 0) {
2d21ac55
A
462 d = m_mbigget(d, M_DONTWAIT);
463 if ((d->m_flags & M_EXT) == 0) {
464 m_free(d);
465 d = NULL;
466 }
91447636
A
467 }
468 }
469 if (!d) {
470 m_freem(m);
0a7de745 471 if (d0) {
91447636 472 m_freem(d0);
0a7de745 473 }
91447636
A
474 return ENOBUFS;
475 }
0a7de745 476 if (!d0) {
91447636 477 d0 = d;
0a7de745
A
478 }
479 if (dp) {
91447636 480 dp->m_next = d;
0a7de745 481 }
91447636 482
316670eb
A
483 // try to make mbuf data aligned
484 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
485 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
486 }
487
91447636
A
488 d->m_len = M_TRAILINGSPACE(d);
489 d->m_len -= d->m_len % AES_BLOCKLEN;
0a7de745 490 if (d->m_len > i) {
91447636 491 d->m_len = i;
0a7de745 492 }
91447636
A
493 dptr = mtod(d, u_int8_t *);
494 dn = 0;
495 }
0a7de745 496
91447636 497 /* adjust len if greater than space available */
0a7de745 498 if (len > d->m_len - dn) {
91447636 499 len = d->m_len - dn;
0a7de745
A
500 }
501
91447636 502 /* encrypt */
316670eb
A
503 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
504 if (IPSEC_IS_P2ALIGNED(sp)) {
505 sp_unaligned = NULL;
506 } else {
507 sp_unaligned = sp;
15129b1c 508 if (len > MAX_REALIGN_LEN) {
5ba3f43e
A
509 m_freem(m);
510 if (d0) {
511 m_freem(d0);
512 }
513 if (sp_aligned != NULL) {
514 FREE(sp_aligned, M_SECA);
515 sp_aligned = NULL;
516 }
15129b1c
A
517 return ENOBUFS;
518 }
519 if (sp_aligned == NULL) {
520 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
5ba3f43e
A
521 if (sp_aligned == NULL) {
522 m_freem(m);
523 if (d0) {
524 m_freem(d0);
525 }
15129b1c 526 return ENOMEM;
5ba3f43e 527 }
15129b1c
A
528 }
529 sp = sp_aligned;
316670eb
A
530 memcpy(sp, sp_unaligned, len);
531 }
532 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
533 if (IPSEC_IS_P2ALIGNED(ivp)) {
534 ivp_unaligned = NULL;
535 } else {
536 ivp_unaligned = ivp;
537 ivp = ivp_aligned_buf;
15129b1c 538 memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
316670eb
A
539 }
540 // no need to check output pointer alignment
0a7de745
A
541 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
542 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
91447636 543
316670eb
A
544 // update unaligned pointers
545 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
546 sp = sp_unaligned;
547 }
548 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
549 ivp = ivp_unaligned;
550 }
551
91447636
A
552 /* update offsets */
553 sn += len;
554 dn += len;
555
556 /* next iv */
0a7de745
A
557 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
558
91447636
A
559 /* find the next source block and skip empty mbufs */
560 while (s && sn >= s->m_len) {
561 sn -= s->m_len;
562 soff += s->m_len;
563 s = s->m_next;
564 }
91447636
A
565 }
566
567 /* free un-needed source mbufs and add dest mbufs to chain */
568 m_freem(scut->m_next);
569 scut->m_len = scutoff;
570 scut->m_next = d0;
0a7de745 571
15129b1c
A
572 // free memory
573 if (sp_aligned != NULL) {
574 FREE(sp_aligned, M_SECA);
575 sp_aligned = NULL;
576 }
91447636
A
577
578 /* just in case */
579 bzero(sbuf, sizeof(sbuf));
91447636
A
580 key_sa_stir_iv(sav);
581
9bccf70c
A
582 return 0;
583}
3e170ce0
A
584
585int
586esp_gcm_schedlen(
587 __unused const struct esp_algorithm *algo)
588{
0a7de745 589 return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN;
3e170ce0
A
590}
591
592int
593esp_gcm_schedule( __unused const struct esp_algorithm *algo,
0a7de745 594 struct secasvar *sav)
3e170ce0 595{
5ba3f43e 596 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
3e170ce0 597 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
d190cdc3 598 u_int ivlen = sav->ivlen;
0a7de745 599 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
3e170ce0
A
600 int rc;
601
602 ctx->decrypt = &ctx->ctxt[0];
603 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
604
0a7de745 605 rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->decrypt);
3e170ce0 606 if (rc) {
0a7de745 607 return rc;
3e170ce0
A
608 }
609
d190cdc3 610 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
0a7de745
A
611 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
612 memcpy(nonce + ESP_GCM_SALT_LEN, sav->iv, ivlen);
d190cdc3 613
0a7de745 614 rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
3e170ce0 615 if (rc) {
0a7de745 616 return rc;
3e170ce0 617 }
d190cdc3
A
618
619 rc = aes_encrypt_reset_gcm(ctx->encrypt);
620 if (rc) {
0a7de745 621 return rc;
d190cdc3
A
622 }
623
0a7de745 624 return rc;
3e170ce0
A
625}
626
627int
628esp_gcm_encrypt_finalize(struct secasvar *sav,
0a7de745 629 unsigned char *tag, unsigned int tag_bytes)
3e170ce0
A
630{
631 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
0a7de745 632 return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt);
3e170ce0
A
633}
634
635int
636esp_gcm_decrypt_finalize(struct secasvar *sav,
0a7de745 637 unsigned char *tag, unsigned int tag_bytes)
3e170ce0
A
638{
639 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
0a7de745 640 return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt);
3e170ce0
A
641}
642
643int
644esp_gcm_encrypt_aes(
645 struct mbuf *m,
646 size_t off,
647 __unused size_t plen,
648 struct secasvar *sav,
649 const struct esp_algorithm *algo __unused,
650 int ivlen)
651{
652 struct mbuf *s;
653 struct mbuf *d, *d0, *dp;
0a7de745
A
654 int soff; /* offset from the head of chain, to head of this mbuf */
655 int sn, dn; /* offset from the head of the mbuf, to meat */
3e170ce0
A
656 size_t ivoff, bodyoff;
657 u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL;
658 aes_gcm_ctx *ctx;
659 struct mbuf *scut;
660 int scutoff;
661 int i, len;
0a7de745 662 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
d190cdc3 663
3e170ce0 664 if (ivlen != ESP_GCM_IVLEN) {
0a7de745 665 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
3e170ce0
A
666 m_freem(m);
667 return EINVAL;
668 }
669
670 if (sav->flags & SADB_X_EXT_OLD) {
671 /* RFC 1827 */
672 ivoff = off + sizeof(struct esp);
673 bodyoff = off + sizeof(struct esp) + ivlen;
674 } else {
675 ivoff = off + sizeof(struct newesp);
676 bodyoff = off + sizeof(struct newesp) + ivlen;
677 }
678
0a7de745 679 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
d190cdc3
A
680 /* generate new iv */
681 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
3e170ce0 682
d190cdc3
A
683 if (aes_encrypt_reset_gcm(ctx->encrypt)) {
684 ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
3e170ce0
A
685 m_freem(m);
686 return EINVAL;
687 }
688
d190cdc3
A
689 if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
690 ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
691 m_freem(m);
692 return EINVAL;
693 }
3e170ce0 694
d190cdc3
A
695 /*
696 * The IV is now generated within corecrypto and
697 * is provided to ESP using aes_encrypt_inc_iv_gcm().
698 * This makes the sav->iv redundant and is no longer
699 * used in GCM operations. But we still copy the IV
700 * back to sav->iv to ensure that any future code reading
701 * this value will get the latest IV.
702 */
703 memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
704 m_copyback(m, ivoff, ivlen, sav->iv);
0a7de745 705 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
d190cdc3
A
706
707 if (m->m_pkthdr.len < bodyoff) {
708 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
709 m->m_pkthdr.len, (u_int32_t)bodyoff));
3e170ce0 710 m_freem(m);
3e170ce0
A
711 return EINVAL;
712 }
3e170ce0
A
713
714 /* Set Additional Authentication Data */
715 if (!(sav->flags & SADB_X_EXT_OLD)) {
0a7de745 716 struct newesp esp;
3e170ce0
A
717 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
718 if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
0a7de745 719 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
3e170ce0
A
720 m_freem(m);
721 return EINVAL;
722 }
723 }
724
725 s = m;
726 soff = sn = dn = 0;
727 d = d0 = dp = NULL;
728 sp = dptr = NULL;
0a7de745 729
3e170ce0
A
730 /* skip headers/IV */
731 while (soff < bodyoff) {
732 if (soff + s->m_len > bodyoff) {
733 sn = bodyoff - soff;
734 break;
735 }
736
737 soff += s->m_len;
738 s = s->m_next;
739 }
740 scut = s;
741 scutoff = sn;
742
743 /* skip over empty mbuf */
0a7de745 744 while (s && s->m_len == 0) {
3e170ce0 745 s = s->m_next;
0a7de745
A
746 }
747
3e170ce0 748 while (soff < m->m_pkthdr.len) {
0a7de745
A
749 /* source */
750 sp = mtod(s, u_int8_t *) + sn;
3e170ce0
A
751 len = s->m_len - sn;
752
753 /* destination */
754 if (!d || (dn + len > d->m_len)) {
0a7de745 755 if (d) {
3e170ce0 756 dp = d;
0a7de745 757 }
3e170ce0
A
758 MGET(d, M_DONTWAIT, MT_DATA);
759 i = m->m_pkthdr.len - (soff + sn);
760 if (d && i > MLEN) {
761 MCLGET(d, M_DONTWAIT);
762 if ((d->m_flags & M_EXT) == 0) {
763 d = m_mbigget(d, M_DONTWAIT);
764 if ((d->m_flags & M_EXT) == 0) {
765 m_free(d);
766 d = NULL;
767 }
768 }
769 }
770 if (!d) {
771 m_freem(m);
0a7de745 772 if (d0) {
3e170ce0 773 m_freem(d0);
0a7de745 774 }
3e170ce0
A
775 return ENOBUFS;
776 }
0a7de745 777 if (!d0) {
3e170ce0 778 d0 = d;
0a7de745
A
779 }
780 if (dp) {
3e170ce0 781 dp->m_next = d;
0a7de745 782 }
3e170ce0
A
783
784 // try to make mbuf data aligned
785 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
786 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
787 }
788
789 d->m_len = M_TRAILINGSPACE(d);
790
0a7de745 791 if (d->m_len > i) {
3e170ce0 792 d->m_len = i;
0a7de745 793 }
3e170ce0
A
794
795 dptr = mtod(d, u_int8_t *);
796 dn = 0;
797 }
0a7de745 798
3e170ce0 799 /* adjust len if greater than space available */
0a7de745 800 if (len > d->m_len - dn) {
3e170ce0 801 len = d->m_len - dn;
0a7de745
A
802 }
803
3e170ce0
A
804 /* encrypt */
805 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
806 if (IPSEC_IS_P2ALIGNED(sp)) {
807 sp_unaligned = NULL;
808 } else {
809 sp_unaligned = sp;
810 if (len > MAX_REALIGN_LEN) {
5ba3f43e
A
811 m_freem(m);
812 if (d0) {
813 m_freem(d0);
814 }
815 if (sp_aligned != NULL) {
816 FREE(sp_aligned, M_SECA);
817 sp_aligned = NULL;
818 }
3e170ce0
A
819 return ENOBUFS;
820 }
821 if (sp_aligned == NULL) {
822 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
5ba3f43e
A
823 if (sp_aligned == NULL) {
824 m_freem(m);
825 if (d0) {
826 m_freem(d0);
827 }
3e170ce0 828 return ENOMEM;
5ba3f43e 829 }
3e170ce0
A
830 }
831 sp = sp_aligned;
832 memcpy(sp, sp_unaligned, len);
833 }
834
0a7de745
A
835 if (aes_encrypt_gcm(sp, len, dptr + dn, ctx->encrypt)) {
836 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
3e170ce0
A
837 m_freem(m);
838 return EINVAL;
839 }
840
841 // update unaligned pointers
842 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
843 sp = sp_unaligned;
844 }
845
846 /* update offsets */
847 sn += len;
848 dn += len;
849
850 /* find the next source block and skip empty mbufs */
851 while (s && sn >= s->m_len) {
852 sn -= s->m_len;
853 soff += s->m_len;
854 s = s->m_next;
855 }
856 }
857
858 /* free un-needed source mbufs and add dest mbufs to chain */
859 m_freem(scut->m_next);
860 scut->m_len = scutoff;
861 scut->m_next = d0;
0a7de745 862
3e170ce0
A
863 // free memory
864 if (sp_aligned != NULL) {
865 FREE(sp_aligned, M_SECA);
866 sp_aligned = NULL;
867 }
868
3e170ce0
A
869 return 0;
870}
871
872int
39037602
A
873esp_gcm_decrypt_aes(
874 struct mbuf *m,
875 size_t off,
876 struct secasvar *sav,
877 const struct esp_algorithm *algo __unused,
878 int ivlen)
3e170ce0
A
879{
880 struct mbuf *s;
881 struct mbuf *d, *d0, *dp;
0a7de745
A
882 int soff; /* offset from the head of chain, to head of this mbuf */
883 int sn, dn; /* offset from the head of the mbuf, to meat */
3e170ce0
A
884 size_t ivoff, bodyoff;
885 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4))), *dptr;
886 u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL;
887 aes_gcm_ctx *ctx;
888 struct mbuf *scut;
889 int scutoff;
0a7de745
A
890 int i, len;
891 unsigned char nonce[ESP_GCM_SALT_LEN + ivlen];
3e170ce0
A
892
893 if (ivlen != ESP_GCM_IVLEN) {
0a7de745 894 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
3e170ce0
A
895 m_freem(m);
896 return EINVAL;
897 }
898
899 if (sav->flags & SADB_X_EXT_OLD) {
900 /* RFC 1827 */
901 ivoff = off + sizeof(struct esp);
902 bodyoff = off + sizeof(struct esp) + ivlen;
903 } else {
904 ivoff = off + sizeof(struct newesp);
905 bodyoff = off + sizeof(struct newesp) + ivlen;
906 }
907
908 if (m->m_pkthdr.len < bodyoff) {
0a7de745 909 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
3e170ce0
A
910 m->m_pkthdr.len, (u_int32_t)bodyoff));
911 m_freem(m);
912 return EINVAL;
913 }
914
915 /* grab iv */
916 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
917
918 /* Set IV */
0a7de745
A
919 memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
920 memcpy(nonce + ESP_GCM_SALT_LEN, iv, ivlen);
3e170ce0
A
921
922 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
923 if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
0a7de745 924 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
3e170ce0
A
925 m_freem(m);
926 bzero(nonce, sizeof(nonce));
927 return EINVAL;
928 }
929 bzero(nonce, sizeof(nonce));
930
931 /* Set Additional Authentication Data */
932 if (!(sav->flags & SADB_X_EXT_OLD)) {
0a7de745 933 struct newesp esp;
3e170ce0
A
934 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
935 if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
0a7de745 936 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
3e170ce0
A
937 return EINVAL;
938 }
939 }
940
941 s = m;
942 soff = sn = dn = 0;
943 d = d0 = dp = NULL;
944 sp = dptr = NULL;
0a7de745 945
3e170ce0
A
946 /* skip header/IV offset */
947 while (soff < bodyoff) {
948 if (soff + s->m_len > bodyoff) {
949 sn = bodyoff - soff;
950 break;
951 }
952
953 soff += s->m_len;
954 s = s->m_next;
955 }
956 scut = s;
957 scutoff = sn;
958
959 /* skip over empty mbuf */
0a7de745 960 while (s && s->m_len == 0) {
3e170ce0 961 s = s->m_next;
0a7de745
A
962 }
963
3e170ce0
A
964 while (soff < m->m_pkthdr.len) {
965 /* source */
0a7de745 966 sp = mtod(s, u_int8_t *) + sn;
3e170ce0
A
967 len = s->m_len - sn;
968
969 /* destination */
970 if (!d || (dn + len > d->m_len)) {
0a7de745 971 if (d) {
3e170ce0 972 dp = d;
0a7de745 973 }
3e170ce0
A
974 MGET(d, M_DONTWAIT, MT_DATA);
975 i = m->m_pkthdr.len - (soff + sn);
976 if (d && i > MLEN) {
977 MCLGET(d, M_DONTWAIT);
978 if ((d->m_flags & M_EXT) == 0) {
979 d = m_mbigget(d, M_DONTWAIT);
980 if ((d->m_flags & M_EXT) == 0) {
981 m_free(d);
982 d = NULL;
983 }
984 }
985 }
986 if (!d) {
987 m_freem(m);
0a7de745 988 if (d0) {
3e170ce0 989 m_freem(d0);
0a7de745 990 }
3e170ce0
A
991 return ENOBUFS;
992 }
0a7de745 993 if (!d0) {
3e170ce0 994 d0 = d;
0a7de745
A
995 }
996 if (dp) {
3e170ce0 997 dp->m_next = d;
0a7de745 998 }
3e170ce0
A
999
1000 // try to make mbuf data aligned
1001 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
1002 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
1003 }
1004
1005 d->m_len = M_TRAILINGSPACE(d);
1006
0a7de745 1007 if (d->m_len > i) {
3e170ce0 1008 d->m_len = i;
0a7de745 1009 }
3e170ce0 1010
0a7de745 1011 dptr = mtod(d, u_int8_t *);
3e170ce0
A
1012 dn = 0;
1013 }
1014
1015 /* adjust len if greater than space available in dest */
0a7de745 1016 if (len > d->m_len - dn) {
3e170ce0 1017 len = d->m_len - dn;
0a7de745 1018 }
3e170ce0
A
1019
1020 /* Decrypt */
1021 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
1022 if (IPSEC_IS_P2ALIGNED(sp)) {
1023 sp_unaligned = NULL;
1024 } else {
1025 sp_unaligned = sp;
1026 if (len > MAX_REALIGN_LEN) {
5ba3f43e
A
1027 m_freem(m);
1028 if (d0) {
1029 m_freem(d0);
1030 }
1031 if (sp_aligned != NULL) {
1032 FREE(sp_aligned, M_SECA);
1033 sp_aligned = NULL;
1034 }
3e170ce0
A
1035 return ENOBUFS;
1036 }
1037 if (sp_aligned == NULL) {
1038 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
5ba3f43e
A
1039 if (sp_aligned == NULL) {
1040 m_freem(m);
1041 if (d0) {
1042 m_freem(d0);
1043 }
3e170ce0 1044 return ENOMEM;
5ba3f43e 1045 }
3e170ce0
A
1046 }
1047 sp = sp_aligned;
1048 memcpy(sp, sp_unaligned, len);
1049 }
1050 // no need to check output pointer alignment
1051
1052 if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) {
0a7de745 1053 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
3e170ce0
A
1054 m_freem(m);
1055 return EINVAL;
1056 }
0a7de745 1057
3e170ce0
A
1058 // update unaligned pointers
1059 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
1060 sp = sp_unaligned;
1061 }
1062
1063 /* udpate offsets */
1064 sn += len;
1065 dn += len;
0a7de745 1066
3e170ce0
A
1067 /* find the next source block */
1068 while (s && sn >= s->m_len) {
1069 sn -= s->m_len;
1070 soff += s->m_len;
1071 s = s->m_next;
1072 }
1073 }
1074
1075 /* free un-needed source mbufs and add dest mbufs to chain */
1076 m_freem(scut->m_next);
1077 scut->m_len = scutoff;
1078 scut->m_next = d0;
1079
1080 // free memory
1081 if (sp_aligned != NULL) {
1082 FREE(sp_aligned, M_SECA);
1083 sp_aligned = NULL;
1084 }
0a7de745 1085
3e170ce0
A
1086 /* just in case */
1087 bzero(iv, sizeof(iv));
1088
1089 return 0;
1090}