]> git.saurik.com Git - apple/xnu.git/blame - bsd/netinet6/esp_rijndael.c
xnu-3789.31.2.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_rijndael.c
CommitLineData
b0d623f7 1/*
39037602 2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
39037602 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
39037602 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
39037602 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
39037602 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
9bccf70c
A
29/* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30/* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/param.h>
62#include <sys/systm.h>
63#include <sys/socket.h>
64#include <sys/queue.h>
91447636
A
65#include <sys/syslog.h>
66#include <sys/mbuf.h>
316670eb 67#include <sys/mcache.h>
91447636
A
68
69#include <kern/locks.h>
9bccf70c
A
70
71#include <net/if.h>
72#include <net/route.h>
73
74#include <netinet6/ipsec.h>
75#include <netinet6/esp.h>
76#include <netinet6/esp_rijndael.h>
77
316670eb 78#include <libkern/crypto/aes.h>
9bccf70c 79
2d21ac55
A
80#include <netkey/key.h>
81
9bccf70c
A
82#include <net/net_osdep.h>
83
15129b1c 84#define MAX_REALIGN_LEN 2000
91447636 85#define AES_BLOCKLEN 16
3e170ce0
A
86#define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87#define ESP_GCM_IVLEN 8
88#define ESP_GCM_ALIGN 16
91447636
A
89
90extern lck_mtx_t *sadb_mutex;
91
3e170ce0
A
92typedef struct {
93 ccgcm_ctx *decrypt;
94 ccgcm_ctx *encrypt;
95 ccgcm_ctx ctxt[0];
96} aes_gcm_ctx;
97
9bccf70c 98int
2d21ac55
A
99esp_aes_schedlen(
100 __unused const struct esp_algorithm *algo)
9bccf70c
A
101{
102
91447636 103 return sizeof(aes_ctx);
9bccf70c
A
104}
105
106int
2d21ac55
A
107esp_aes_schedule(
108 __unused const struct esp_algorithm *algo,
109 struct secasvar *sav)
9bccf70c 110{
2d21ac55
A
111
112 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
91447636
A
113 aes_ctx *ctx = (aes_ctx*)sav->sched;
114
b0d623f7
A
115 aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
116 aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
91447636 117
9bccf70c
A
118 return 0;
119}
120
91447636
A
121
122/* The following 2 functions decrypt or encrypt the contents of
123 * the mbuf chain passed in keeping the IP and ESP header's in place,
124 * along with the IV.
125 * The code attempts to call the crypto code with the largest chunk
126 * of data it can based on the amount of source data in
127 * the current source mbuf and the space remaining in the current
128 * destination mbuf. The crypto code requires data to be a multiples
129 * of 16 bytes. A separate buffer is used when a 16 byte block spans
130 * mbufs.
131 *
132 * m = mbuf chain
133 * off = offset to ESP header
134 *
135 * local vars for source:
136 * soff = offset from beginning of the chain to the head of the
137 * current mbuf.
138 * scut = last mbuf that contains headers to be retained
139 * scutoff = offset to end of the headers in scut
140 * s = the current mbuf
141 * sn = current offset to data in s (next source data to process)
142 *
143 * local vars for dest:
144 * d0 = head of chain
145 * d = current mbuf
146 * dn = current offset in d (next location to store result)
147 */
148
149
9bccf70c 150int
39037602
A
151esp_cbc_decrypt_aes(
152 struct mbuf *m,
153 size_t off,
154 struct secasvar *sav,
155 const struct esp_algorithm *algo,
156 int ivlen)
9bccf70c 157{
91447636
A
158 struct mbuf *s;
159 struct mbuf *d, *d0, *dp;
160 int soff; /* offset from the head of chain, to head of this mbuf */
161 int sn, dn; /* offset from the head of the mbuf, to meat */
162 size_t ivoff, bodyoff;
316670eb 163 u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
15129b1c 164 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
91447636
A
165 struct mbuf *scut;
166 int scutoff;
167 int i, len;
168
169
170 if (ivlen != AES_BLOCKLEN) {
171 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
172 "unsupported ivlen %d\n", algo->name, ivlen));
173 m_freem(m);
174 return EINVAL;
175 }
176
177 if (sav->flags & SADB_X_EXT_OLD) {
178 /* RFC 1827 */
179 ivoff = off + sizeof(struct esp);
180 bodyoff = off + sizeof(struct esp) + ivlen;
181 } else {
182 ivoff = off + sizeof(struct newesp);
183 bodyoff = off + sizeof(struct newesp) + ivlen;
184 }
185
186 if (m->m_pkthdr.len < bodyoff) {
187 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
b0d623f7 188 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
91447636
A
189 m_freem(m);
190 return EINVAL;
191 }
192 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
193 ipseclog((LOG_ERR, "esp_cbc_decrypt %s: "
194 "payload length must be multiple of %d\n",
195 algo->name, AES_BLOCKLEN));
196 m_freem(m);
197 return EINVAL;
198 }
199
200 /* grab iv */
b0d623f7 201 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
91447636 202
91447636
A
203 s = m;
204 soff = sn = dn = 0;
205 d = d0 = dp = NULL;
206 sp = dptr = NULL;
207
208 /* skip header/IV offset */
209 while (soff < bodyoff) {
210 if (soff + s->m_len > bodyoff) {
211 sn = bodyoff - soff;
212 break;
213 }
214
215 soff += s->m_len;
216 s = s->m_next;
217 }
218 scut = s;
219 scutoff = sn;
220
221 /* skip over empty mbuf */
222 while (s && s->m_len == 0)
223 s = s->m_next;
224
225 while (soff < m->m_pkthdr.len) {
226 /* source */
227 if (sn + AES_BLOCKLEN <= s->m_len) {
228 /* body is continuous */
229 sp = mtod(s, u_int8_t *) + sn;
230 len = s->m_len - sn;
231 len -= len % AES_BLOCKLEN; // full blocks only
232 } else {
233 /* body is non-continuous */
b0d623f7 234 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
91447636
A
235 sp = sbuf;
236 len = AES_BLOCKLEN; // 1 block only in sbuf
237 }
238
239 /* destination */
240 if (!d || dn + AES_BLOCKLEN > d->m_len) {
241 if (d)
242 dp = d;
243 MGET(d, M_DONTWAIT, MT_DATA);
244 i = m->m_pkthdr.len - (soff + sn);
245 if (d && i > MLEN) {
246 MCLGET(d, M_DONTWAIT);
247 if ((d->m_flags & M_EXT) == 0) {
2d21ac55
A
248 d = m_mbigget(d, M_DONTWAIT);
249 if ((d->m_flags & M_EXT) == 0) {
250 m_free(d);
251 d = NULL;
252 }
91447636
A
253 }
254 }
255 if (!d) {
256 m_freem(m);
257 if (d0)
258 m_freem(d0);
91447636
A
259 return ENOBUFS;
260 }
261 if (!d0)
262 d0 = d;
263 if (dp)
264 dp->m_next = d;
316670eb
A
265
266 // try to make mbuf data aligned
267 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
268 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
269 }
270
91447636
A
271 d->m_len = M_TRAILINGSPACE(d);
272 d->m_len -= d->m_len % AES_BLOCKLEN;
273 if (d->m_len > i)
274 d->m_len = i;
275 dptr = mtod(d, u_int8_t *);
276 dn = 0;
277 }
278
279 /* adjust len if greater than space available in dest */
280 if (len > d->m_len - dn)
281 len = d->m_len - dn;
282
283 /* decrypt */
316670eb
A
284 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
285 if (IPSEC_IS_P2ALIGNED(sp)) {
286 sp_unaligned = NULL;
287 } else {
288 sp_unaligned = sp;
15129b1c
A
289 if (len > MAX_REALIGN_LEN) {
290 return ENOBUFS;
291 }
292 if (sp_aligned == NULL) {
293 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
294 if (sp_aligned == NULL)
295 return ENOMEM;
296 }
297 sp = sp_aligned;
316670eb
A
298 memcpy(sp, sp_unaligned, len);
299 }
300 // no need to check output pointer alignment
91447636
A
301 aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
302 (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
303
316670eb
A
304 // update unaligned pointers
305 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
306 sp = sp_unaligned;
307 }
308
91447636
A
309 /* udpate offsets */
310 sn += len;
311 dn += len;
312
313 // next iv
314 bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN);
315
316 /* find the next source block */
317 while (s && sn >= s->m_len) {
318 sn -= s->m_len;
319 soff += s->m_len;
320 s = s->m_next;
321 }
322
323 }
324
325 /* free un-needed source mbufs and add dest mbufs to chain */
326 m_freem(scut->m_next);
327 scut->m_len = scutoff;
328 scut->m_next = d0;
329
15129b1c
A
330 // free memory
331 if (sp_aligned != NULL) {
332 FREE(sp_aligned, M_SECA);
333 sp_aligned = NULL;
334 }
335
91447636
A
336 /* just in case */
337 bzero(iv, sizeof(iv));
338 bzero(sbuf, sizeof(sbuf));
91447636 339
9bccf70c
A
340 return 0;
341}
342
343int
2d21ac55
A
344esp_cbc_encrypt_aes(
345 struct mbuf *m,
346 size_t off,
347 __unused size_t plen,
348 struct secasvar *sav,
349 const struct esp_algorithm *algo,
350 int ivlen)
9bccf70c 351{
91447636
A
352 struct mbuf *s;
353 struct mbuf *d, *d0, *dp;
2d21ac55 354 int soff; /* offset from the head of chain, to head of this mbuf */
91447636
A
355 int sn, dn; /* offset from the head of the mbuf, to meat */
356 size_t ivoff, bodyoff;
316670eb 357 u_int8_t *ivp, *dptr, *ivp_unaligned;
15129b1c 358 u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
316670eb 359 u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
91447636
A
360 struct mbuf *scut;
361 int scutoff;
362 int i, len;
363
364 if (ivlen != AES_BLOCKLEN) {
365 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
366 "unsupported ivlen %d\n", algo->name, ivlen));
367 m_freem(m);
368 return EINVAL;
369 }
370
371 if (sav->flags & SADB_X_EXT_OLD) {
372 /* RFC 1827 */
373 ivoff = off + sizeof(struct esp);
374 bodyoff = off + sizeof(struct esp) + ivlen;
375 } else {
376 ivoff = off + sizeof(struct newesp);
377 bodyoff = off + sizeof(struct newesp) + ivlen;
378 }
379
380 /* put iv into the packet */
381 m_copyback(m, ivoff, ivlen, sav->iv);
b0d623f7 382 ivp = (u_int8_t *) sav->iv;
91447636
A
383
384 if (m->m_pkthdr.len < bodyoff) {
385 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
b0d623f7 386 algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
91447636
A
387 m_freem(m);
388 return EINVAL;
389 }
390 if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) {
391 ipseclog((LOG_ERR, "esp_cbc_encrypt %s: "
392 "payload length must be multiple of %lu\n",
393 algo->name, AES_BLOCKLEN));
394 m_freem(m);
395 return EINVAL;
396 }
91447636
A
397
398 s = m;
399 soff = sn = dn = 0;
400 d = d0 = dp = NULL;
401 sp = dptr = NULL;
402
403 /* skip headers/IV */
404 while (soff < bodyoff) {
405 if (soff + s->m_len > bodyoff) {
406 sn = bodyoff - soff;
407 break;
408 }
409
410 soff += s->m_len;
411 s = s->m_next;
412 }
413 scut = s;
414 scutoff = sn;
415
416 /* skip over empty mbuf */
417 while (s && s->m_len == 0)
418 s = s->m_next;
419
420 while (soff < m->m_pkthdr.len) {
421 /* source */
422 if (sn + AES_BLOCKLEN <= s->m_len) {
423 /* body is continuous */
424 sp = mtod(s, u_int8_t *) + sn;
425 len = s->m_len - sn;
426 len -= len % AES_BLOCKLEN; // full blocks only
427 } else {
428 /* body is non-continuous */
b0d623f7 429 m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
91447636
A
430 sp = sbuf;
431 len = AES_BLOCKLEN; // 1 block only in sbuf
432 }
433
434 /* destination */
435 if (!d || dn + AES_BLOCKLEN > d->m_len) {
436 if (d)
437 dp = d;
438 MGET(d, M_DONTWAIT, MT_DATA);
439 i = m->m_pkthdr.len - (soff + sn);
440 if (d && i > MLEN) {
441 MCLGET(d, M_DONTWAIT);
442 if ((d->m_flags & M_EXT) == 0) {
2d21ac55
A
443 d = m_mbigget(d, M_DONTWAIT);
444 if ((d->m_flags & M_EXT) == 0) {
445 m_free(d);
446 d = NULL;
447 }
91447636
A
448 }
449 }
450 if (!d) {
451 m_freem(m);
452 if (d0)
453 m_freem(d0);
91447636
A
454 return ENOBUFS;
455 }
456 if (!d0)
457 d0 = d;
458 if (dp)
459 dp->m_next = d;
460
316670eb
A
461 // try to make mbuf data aligned
462 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
463 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
464 }
465
91447636
A
466 d->m_len = M_TRAILINGSPACE(d);
467 d->m_len -= d->m_len % AES_BLOCKLEN;
468 if (d->m_len > i)
469 d->m_len = i;
470 dptr = mtod(d, u_int8_t *);
471 dn = 0;
472 }
473
474 /* adjust len if greater than space available */
475 if (len > d->m_len - dn)
476 len = d->m_len - dn;
477
478 /* encrypt */
316670eb
A
479 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
480 if (IPSEC_IS_P2ALIGNED(sp)) {
481 sp_unaligned = NULL;
482 } else {
483 sp_unaligned = sp;
15129b1c
A
484 if (len > MAX_REALIGN_LEN) {
485 return ENOBUFS;
486 }
487 if (sp_aligned == NULL) {
488 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
489 if (sp_aligned == NULL)
490 return ENOMEM;
491 }
492 sp = sp_aligned;
316670eb
A
493 memcpy(sp, sp_unaligned, len);
494 }
495 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
496 if (IPSEC_IS_P2ALIGNED(ivp)) {
497 ivp_unaligned = NULL;
498 } else {
499 ivp_unaligned = ivp;
500 ivp = ivp_aligned_buf;
15129b1c 501 memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
316670eb
A
502 }
503 // no need to check output pointer alignment
91447636
A
504 aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
505 (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
506
316670eb
A
507 // update unaligned pointers
508 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
509 sp = sp_unaligned;
510 }
511 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
512 ivp = ivp_unaligned;
513 }
514
91447636
A
515 /* update offsets */
516 sn += len;
517 dn += len;
518
519 /* next iv */
520 ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted
521
522 /* find the next source block and skip empty mbufs */
523 while (s && sn >= s->m_len) {
524 sn -= s->m_len;
525 soff += s->m_len;
526 s = s->m_next;
527 }
91447636
A
528 }
529
530 /* free un-needed source mbufs and add dest mbufs to chain */
531 m_freem(scut->m_next);
532 scut->m_len = scutoff;
533 scut->m_next = d0;
15129b1c
A
534
535 // free memory
536 if (sp_aligned != NULL) {
537 FREE(sp_aligned, M_SECA);
538 sp_aligned = NULL;
539 }
91447636
A
540
541 /* just in case */
542 bzero(sbuf, sizeof(sbuf));
91447636
A
543 key_sa_stir_iv(sav);
544
9bccf70c
A
545 return 0;
546}
3e170ce0
A
547
548int
549esp_gcm_schedlen(
550 __unused const struct esp_algorithm *algo)
551{
552 return (sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN);
553}
554
555int
556esp_gcm_schedule( __unused const struct esp_algorithm *algo,
557 struct secasvar *sav)
558{
559 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
560 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
d190cdc3
A
561 u_int ivlen = sav->ivlen;
562 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
3e170ce0
A
563 int rc;
564
565 ctx->decrypt = &ctx->ctxt[0];
566 ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)];
567
568 rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->decrypt);
569 if (rc) {
570 return (rc);
571 }
572
d190cdc3
A
573 bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
574 memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
575 memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen);
576
577 rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
3e170ce0
A
578 if (rc) {
579 return (rc);
580 }
d190cdc3
A
581
582 rc = aes_encrypt_reset_gcm(ctx->encrypt);
583 if (rc) {
584 return (rc);
585 }
586
3e170ce0
A
587 return (rc);
588}
589
590int
591esp_gcm_encrypt_finalize(struct secasvar *sav,
592 unsigned char *tag, unsigned int tag_bytes)
593{
594 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
595 return (aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt));
596}
597
598int
599esp_gcm_decrypt_finalize(struct secasvar *sav,
600 unsigned char *tag, unsigned int tag_bytes)
601{
602 aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
603 return (aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt));
604}
605
606int
607esp_gcm_encrypt_aes(
608 struct mbuf *m,
609 size_t off,
610 __unused size_t plen,
611 struct secasvar *sav,
612 const struct esp_algorithm *algo __unused,
613 int ivlen)
614{
615 struct mbuf *s;
616 struct mbuf *d, *d0, *dp;
617 int soff; /* offset from the head of chain, to head of this mbuf */
618 int sn, dn; /* offset from the head of the mbuf, to meat */
619 size_t ivoff, bodyoff;
620 u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL;
621 aes_gcm_ctx *ctx;
622 struct mbuf *scut;
623 int scutoff;
624 int i, len;
625 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
d190cdc3 626
3e170ce0
A
627 if (ivlen != ESP_GCM_IVLEN) {
628 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
629 m_freem(m);
630 return EINVAL;
631 }
632
633 if (sav->flags & SADB_X_EXT_OLD) {
634 /* RFC 1827 */
635 ivoff = off + sizeof(struct esp);
636 bodyoff = off + sizeof(struct esp) + ivlen;
637 } else {
638 ivoff = off + sizeof(struct newesp);
639 bodyoff = off + sizeof(struct newesp) + ivlen;
640 }
641
d190cdc3
A
642 bzero(nonce, ESP_GCM_SALT_LEN+ivlen);
643 /* generate new iv */
644 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
3e170ce0 645
d190cdc3
A
646 if (aes_encrypt_reset_gcm(ctx->encrypt)) {
647 ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
3e170ce0
A
648 m_freem(m);
649 return EINVAL;
650 }
651
d190cdc3
A
652 if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
653 ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
654 m_freem(m);
655 return EINVAL;
656 }
3e170ce0 657
d190cdc3
A
658 /*
659 * The IV is now generated within corecrypto and
660 * is provided to ESP using aes_encrypt_inc_iv_gcm().
661 * This makes the sav->iv redundant and is no longer
662 * used in GCM operations. But we still copy the IV
663 * back to sav->iv to ensure that any future code reading
664 * this value will get the latest IV.
665 */
666 memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
667 m_copyback(m, ivoff, ivlen, sav->iv);
668 bzero(nonce, ESP_GCM_SALT_LEN+ivlen);
669
670 if (m->m_pkthdr.len < bodyoff) {
671 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
672 m->m_pkthdr.len, (u_int32_t)bodyoff));
3e170ce0 673 m_freem(m);
3e170ce0
A
674 return EINVAL;
675 }
3e170ce0
A
676
677 /* Set Additional Authentication Data */
678 if (!(sav->flags & SADB_X_EXT_OLD)) {
679 struct newesp esp;
680 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
681 if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) {
682 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
683 m_freem(m);
684 return EINVAL;
685 }
686 }
687
688 s = m;
689 soff = sn = dn = 0;
690 d = d0 = dp = NULL;
691 sp = dptr = NULL;
692
693 /* skip headers/IV */
694 while (soff < bodyoff) {
695 if (soff + s->m_len > bodyoff) {
696 sn = bodyoff - soff;
697 break;
698 }
699
700 soff += s->m_len;
701 s = s->m_next;
702 }
703 scut = s;
704 scutoff = sn;
705
706 /* skip over empty mbuf */
707 while (s && s->m_len == 0)
708 s = s->m_next;
709
710 while (soff < m->m_pkthdr.len) {
711 /* source */
712 sp = mtod(s, u_int8_t *) + sn;
713 len = s->m_len - sn;
714
715 /* destination */
716 if (!d || (dn + len > d->m_len)) {
717 if (d)
718 dp = d;
719 MGET(d, M_DONTWAIT, MT_DATA);
720 i = m->m_pkthdr.len - (soff + sn);
721 if (d && i > MLEN) {
722 MCLGET(d, M_DONTWAIT);
723 if ((d->m_flags & M_EXT) == 0) {
724 d = m_mbigget(d, M_DONTWAIT);
725 if ((d->m_flags & M_EXT) == 0) {
726 m_free(d);
727 d = NULL;
728 }
729 }
730 }
731 if (!d) {
732 m_freem(m);
733 if (d0)
734 m_freem(d0);
735 return ENOBUFS;
736 }
737 if (!d0)
738 d0 = d;
739 if (dp)
740 dp->m_next = d;
741
742 // try to make mbuf data aligned
743 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
744 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
745 }
746
747 d->m_len = M_TRAILINGSPACE(d);
748
749 if (d->m_len > i)
750 d->m_len = i;
751
752 dptr = mtod(d, u_int8_t *);
753 dn = 0;
754 }
755
756 /* adjust len if greater than space available */
757 if (len > d->m_len - dn)
758 len = d->m_len - dn;
759
760 /* encrypt */
761 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
762 if (IPSEC_IS_P2ALIGNED(sp)) {
763 sp_unaligned = NULL;
764 } else {
765 sp_unaligned = sp;
766 if (len > MAX_REALIGN_LEN) {
767 return ENOBUFS;
768 }
769 if (sp_aligned == NULL) {
770 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
771 if (sp_aligned == NULL)
772 return ENOMEM;
773 }
774 sp = sp_aligned;
775 memcpy(sp, sp_unaligned, len);
776 }
777
778 if (aes_encrypt_gcm(sp, len, dptr+dn, ctx->encrypt)) {
779 ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__));
780 m_freem(m);
781 return EINVAL;
782 }
783
784 // update unaligned pointers
785 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
786 sp = sp_unaligned;
787 }
788
789 /* update offsets */
790 sn += len;
791 dn += len;
792
793 /* find the next source block and skip empty mbufs */
794 while (s && sn >= s->m_len) {
795 sn -= s->m_len;
796 soff += s->m_len;
797 s = s->m_next;
798 }
799 }
800
801 /* free un-needed source mbufs and add dest mbufs to chain */
802 m_freem(scut->m_next);
803 scut->m_len = scutoff;
804 scut->m_next = d0;
805
806 // free memory
807 if (sp_aligned != NULL) {
808 FREE(sp_aligned, M_SECA);
809 sp_aligned = NULL;
810 }
811
3e170ce0
A
812 return 0;
813}
814
815int
39037602
A
816esp_gcm_decrypt_aes(
817 struct mbuf *m,
818 size_t off,
819 struct secasvar *sav,
820 const struct esp_algorithm *algo __unused,
821 int ivlen)
3e170ce0
A
822{
823 struct mbuf *s;
824 struct mbuf *d, *d0, *dp;
825 int soff; /* offset from the head of chain, to head of this mbuf */
826 int sn, dn; /* offset from the head of the mbuf, to meat */
827 size_t ivoff, bodyoff;
828 u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4))), *dptr;
829 u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL;
830 aes_gcm_ctx *ctx;
831 struct mbuf *scut;
832 int scutoff;
833 int i, len;
834 unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
835
836 if (ivlen != ESP_GCM_IVLEN) {
837 ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
838 m_freem(m);
839 return EINVAL;
840 }
841
842 if (sav->flags & SADB_X_EXT_OLD) {
843 /* RFC 1827 */
844 ivoff = off + sizeof(struct esp);
845 bodyoff = off + sizeof(struct esp) + ivlen;
846 } else {
847 ivoff = off + sizeof(struct newesp);
848 bodyoff = off + sizeof(struct newesp) + ivlen;
849 }
850
851 if (m->m_pkthdr.len < bodyoff) {
852 ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
853 m->m_pkthdr.len, (u_int32_t)bodyoff));
854 m_freem(m);
855 return EINVAL;
856 }
857
858 /* grab iv */
859 m_copydata(m, ivoff, ivlen, (caddr_t) iv);
860
861 /* Set IV */
862 memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
863 memcpy(nonce+ESP_GCM_SALT_LEN, iv, ivlen);
864
865 ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
866 if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) {
867 ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
868 m_freem(m);
869 bzero(nonce, sizeof(nonce));
870 return EINVAL;
871 }
872 bzero(nonce, sizeof(nonce));
873
874 /* Set Additional Authentication Data */
875 if (!(sav->flags & SADB_X_EXT_OLD)) {
876 struct newesp esp;
877 m_copydata(m, off, sizeof(esp), (caddr_t) &esp);
878 if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) {
879 ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__));
880 return EINVAL;
881 }
882 }
883
884 s = m;
885 soff = sn = dn = 0;
886 d = d0 = dp = NULL;
887 sp = dptr = NULL;
888
889 /* skip header/IV offset */
890 while (soff < bodyoff) {
891 if (soff + s->m_len > bodyoff) {
892 sn = bodyoff - soff;
893 break;
894 }
895
896 soff += s->m_len;
897 s = s->m_next;
898 }
899 scut = s;
900 scutoff = sn;
901
902 /* skip over empty mbuf */
903 while (s && s->m_len == 0)
904 s = s->m_next;
905
906 while (soff < m->m_pkthdr.len) {
907 /* source */
908 sp = mtod(s, u_int8_t *) + sn;
909 len = s->m_len - sn;
910
911 /* destination */
912 if (!d || (dn + len > d->m_len)) {
913 if (d)
914 dp = d;
915 MGET(d, M_DONTWAIT, MT_DATA);
916 i = m->m_pkthdr.len - (soff + sn);
917 if (d && i > MLEN) {
918 MCLGET(d, M_DONTWAIT);
919 if ((d->m_flags & M_EXT) == 0) {
920 d = m_mbigget(d, M_DONTWAIT);
921 if ((d->m_flags & M_EXT) == 0) {
922 m_free(d);
923 d = NULL;
924 }
925 }
926 }
927 if (!d) {
928 m_freem(m);
929 if (d0)
930 m_freem(d0);
931 return ENOBUFS;
932 }
933 if (!d0)
934 d0 = d;
935 if (dp)
936 dp->m_next = d;
937
938 // try to make mbuf data aligned
939 if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
940 m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
941 }
942
943 d->m_len = M_TRAILINGSPACE(d);
944
945 if (d->m_len > i)
946 d->m_len = i;
947
948 dptr = mtod(d, u_int8_t *);
949 dn = 0;
950 }
951
952 /* adjust len if greater than space available in dest */
953 if (len > d->m_len - dn)
954 len = d->m_len - dn;
955
956 /* Decrypt */
957 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
958 if (IPSEC_IS_P2ALIGNED(sp)) {
959 sp_unaligned = NULL;
960 } else {
961 sp_unaligned = sp;
962 if (len > MAX_REALIGN_LEN) {
963 return ENOBUFS;
964 }
965 if (sp_aligned == NULL) {
966 sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
967 if (sp_aligned == NULL)
968 return ENOMEM;
969 }
970 sp = sp_aligned;
971 memcpy(sp, sp_unaligned, len);
972 }
973 // no need to check output pointer alignment
974
975 if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) {
976 ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__));
977 m_freem(m);
978 return EINVAL;
979 }
980
981 // update unaligned pointers
982 if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
983 sp = sp_unaligned;
984 }
985
986 /* udpate offsets */
987 sn += len;
988 dn += len;
989
990 /* find the next source block */
991 while (s && sn >= s->m_len) {
992 sn -= s->m_len;
993 soff += s->m_len;
994 s = s->m_next;
995 }
996 }
997
998 /* free un-needed source mbufs and add dest mbufs to chain */
999 m_freem(scut->m_next);
1000 scut->m_len = scutoff;
1001 scut->m_next = d0;
1002
1003 // free memory
1004 if (sp_aligned != NULL) {
1005 FREE(sp_aligned, M_SECA);
1006 sp_aligned = NULL;
1007 }
1008
1009 /* just in case */
1010 bzero(iv, sizeof(iv));
1011
1012 return 0;
1013}