X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/9bccf70c0258c7cac2dcb80011b2a964d884c552..7ee9d059c4eecf68ae4f8b0fb99ae2471eda79af:/bsd/netinet6/esp_rijndael.c diff --git a/bsd/netinet6/esp_rijndael.c b/bsd/netinet6/esp_rijndael.c index fa35c593c..0f3ce7c27 100644 --- a/bsd/netinet6/esp_rijndael.c +++ b/bsd/netinet6/esp_rijndael.c @@ -1,3 +1,31 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */ /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */ @@ -34,6 +62,10 @@ #include #include #include +#include +#include + +#include #include #include @@ -42,72 +74,384 @@ #include #include -#include +#include + +#include #include -/* as rijndael uses assymetric scheduled keys, we need to do it twice. */ +#define AES_BLOCKLEN 16 + +extern lck_mtx_t *sadb_mutex; + int -esp_rijndael_schedlen(algo) - const struct esp_algorithm *algo; +esp_aes_schedlen( + __unused const struct esp_algorithm *algo) { - return sizeof(keyInstance) * 2; + return sizeof(aes_ctx); } int -esp_rijndael_schedule(algo, sav) - const struct esp_algorithm *algo; - struct secasvar *sav; +esp_aes_schedule( + __unused const struct esp_algorithm *algo, + struct secasvar *sav) { - keyInstance *k; - - k = (keyInstance *)sav->sched; - if (rijndael_makeKey(&k[0], DIR_DECRYPT, _KEYLEN(sav->key_enc) * 8, - _KEYBUF(sav->key_enc)) < 0) - return -1; - if (rijndael_makeKey(&k[1], DIR_ENCRYPT, _KEYLEN(sav->key_enc) * 8, - _KEYBUF(sav->key_enc)) < 0) - return -1; + + lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED); + aes_ctx *ctx = (aes_ctx*)sav->sched; + + aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt); + aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt); + return 0; } + +/* The following 2 functions decrypt or encrypt the contents of + * the mbuf chain passed in keeping the IP and ESP header's in place, + * along with the IV. + * The code attempts to call the crypto code with the largest chunk + * of data it can based on the amount of source data in + * the current source mbuf and the space remaining in the current + * destination mbuf. The crypto code requires data to be a multiples + * of 16 bytes. A separate buffer is used when a 16 byte block spans + * mbufs. + * + * m = mbuf chain + * off = offset to ESP header + * + * local vars for source: + * soff = offset from beginning of the chain to the head of the + * current mbuf. + * scut = last mbuf that contains headers to be retained + * scutoff = offset to end of the headers in scut + * s = the current mbuf + * sn = current offset to data in s (next source data to process) + * + * local vars for dest: + * d0 = head of chain + * d = current mbuf + * dn = current offset in d (next location to store result) + */ + + int -esp_rijndael_blockdecrypt(algo, sav, s, d) - const struct esp_algorithm *algo; +esp_cbc_decrypt_aes(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; struct secasvar *sav; - u_int8_t *s; - u_int8_t *d; + const struct esp_algorithm *algo; + int ivlen; { - cipherInstance c; - keyInstance *p; - - /* does not take advantage of CBC mode support */ - bzero(&c, sizeof(c)); - if (rijndael_cipherInit(&c, MODE_ECB, NULL) < 0) - return -1; - p = (keyInstance *)sav->sched; - if (rijndael_blockDecrypt(&c, &p[0], s, algo->padbound * 8, d) < 0) - return -1; + struct mbuf *s; + struct mbuf *d, *d0, *dp; + int soff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ + size_t ivoff, bodyoff; + u_int8_t iv[AES_BLOCKLEN], *dptr; + u_int8_t sbuf[AES_BLOCKLEN], *sp; + struct mbuf *scut; + int scutoff; + int i, len; + + + if (ivlen != AES_BLOCKLEN) { + ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " + "unsupported ivlen %d\n", algo->name, ivlen)); + m_freem(m); + return EINVAL; + } + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + ivoff = off + sizeof(struct esp); + bodyoff = off + sizeof(struct esp) + ivlen; + } else { + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + } + + if (m->m_pkthdr.len < bodyoff) { + ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n", + algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff)); + m_freem(m); + return EINVAL; + } + if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) { + ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " + "payload length must be multiple of %d\n", + algo->name, AES_BLOCKLEN)); + m_freem(m); + return EINVAL; + } + + /* grab iv */ + m_copydata(m, ivoff, ivlen, (caddr_t) iv); + + s = m; + soff = sn = dn = 0; + d = d0 = dp = NULL; + sp = dptr = NULL; + + /* skip header/IV offset */ + while (soff < bodyoff) { + if (soff + s->m_len > bodyoff) { + sn = bodyoff - soff; + break; + } + + soff += s->m_len; + s = s->m_next; + } + scut = s; + scutoff = sn; + + /* skip over empty mbuf */ + while (s && s->m_len == 0) + s = s->m_next; + + while (soff < m->m_pkthdr.len) { + /* source */ + if (sn + AES_BLOCKLEN <= s->m_len) { + /* body is continuous */ + sp = mtod(s, u_int8_t *) + sn; + len = s->m_len - sn; + len -= len % AES_BLOCKLEN; // full blocks only + } else { + /* body is non-continuous */ + m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf); + sp = sbuf; + len = AES_BLOCKLEN; // 1 block only in sbuf + } + + /* destination */ + if (!d || dn + AES_BLOCKLEN > d->m_len) { + if (d) + dp = d; + MGET(d, M_DONTWAIT, MT_DATA); + i = m->m_pkthdr.len - (soff + sn); + if (d && i > MLEN) { + MCLGET(d, M_DONTWAIT); + if ((d->m_flags & M_EXT) == 0) { + d = m_mbigget(d, M_DONTWAIT); + if ((d->m_flags & M_EXT) == 0) { + m_free(d); + d = NULL; + } + } + } + if (!d) { + m_freem(m); + if (d0) + m_freem(d0); + return ENOBUFS; + } + if (!d0) + d0 = d; + if (dp) + dp->m_next = d; + d->m_len = M_TRAILINGSPACE(d); + d->m_len -= d->m_len % AES_BLOCKLEN; + if (d->m_len > i) + d->m_len = i; + dptr = mtod(d, u_int8_t *); + dn = 0; + } + + /* adjust len if greater than space available in dest */ + if (len > d->m_len - dn) + len = d->m_len - dn; + + /* decrypt */ + aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn, + (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt))); + + /* udpate offsets */ + sn += len; + dn += len; + + // next iv + bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN); + + /* find the next source block */ + while (s && sn >= s->m_len) { + sn -= s->m_len; + soff += s->m_len; + s = s->m_next; + } + + } + + /* free un-needed source mbufs and add dest mbufs to chain */ + m_freem(scut->m_next); + scut->m_len = scutoff; + scut->m_next = d0; + + /* just in case */ + bzero(iv, sizeof(iv)); + bzero(sbuf, sizeof(sbuf)); + return 0; } int -esp_rijndael_blockencrypt(algo, sav, s, d) - const struct esp_algorithm *algo; - struct secasvar *sav; - u_int8_t *s; - u_int8_t *d; +esp_cbc_encrypt_aes( + struct mbuf *m, + size_t off, + __unused size_t plen, + struct secasvar *sav, + const struct esp_algorithm *algo, + int ivlen) { - cipherInstance c; - keyInstance *p; - - /* does not take advantage of CBC mode support */ - bzero(&c, sizeof(c)); - if (rijndael_cipherInit(&c, MODE_ECB, NULL) < 0) - return -1; - p = (keyInstance *)sav->sched; - if (rijndael_blockEncrypt(&c, &p[1], s, algo->padbound * 8, d) < 0) - return -1; + struct mbuf *s; + struct mbuf *d, *d0, *dp; + int soff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ + size_t ivoff, bodyoff; + u_int8_t *ivp, *dptr; + u_int8_t sbuf[AES_BLOCKLEN], *sp; + struct mbuf *scut; + int scutoff; + int i, len; + + if (ivlen != AES_BLOCKLEN) { + ipseclog((LOG_ERR, "esp_cbc_encrypt %s: " + "unsupported ivlen %d\n", algo->name, ivlen)); + m_freem(m); + return EINVAL; + } + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + ivoff = off + sizeof(struct esp); + bodyoff = off + sizeof(struct esp) + ivlen; + } else { + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + } + + /* put iv into the packet */ + m_copyback(m, ivoff, ivlen, sav->iv); + ivp = (u_int8_t *) sav->iv; + + if (m->m_pkthdr.len < bodyoff) { + ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n", + algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff)); + m_freem(m); + return EINVAL; + } + if ((m->m_pkthdr.len - bodyoff) % AES_BLOCKLEN) { + ipseclog((LOG_ERR, "esp_cbc_encrypt %s: " + "payload length must be multiple of %lu\n", + algo->name, AES_BLOCKLEN)); + m_freem(m); + return EINVAL; + } + + s = m; + soff = sn = dn = 0; + d = d0 = dp = NULL; + sp = dptr = NULL; + + /* skip headers/IV */ + while (soff < bodyoff) { + if (soff + s->m_len > bodyoff) { + sn = bodyoff - soff; + break; + } + + soff += s->m_len; + s = s->m_next; + } + scut = s; + scutoff = sn; + + /* skip over empty mbuf */ + while (s && s->m_len == 0) + s = s->m_next; + + while (soff < m->m_pkthdr.len) { + /* source */ + if (sn + AES_BLOCKLEN <= s->m_len) { + /* body is continuous */ + sp = mtod(s, u_int8_t *) + sn; + len = s->m_len - sn; + len -= len % AES_BLOCKLEN; // full blocks only + } else { + /* body is non-continuous */ + m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf); + sp = sbuf; + len = AES_BLOCKLEN; // 1 block only in sbuf + } + + /* destination */ + if (!d || dn + AES_BLOCKLEN > d->m_len) { + if (d) + dp = d; + MGET(d, M_DONTWAIT, MT_DATA); + i = m->m_pkthdr.len - (soff + sn); + if (d && i > MLEN) { + MCLGET(d, M_DONTWAIT); + if ((d->m_flags & M_EXT) == 0) { + d = m_mbigget(d, M_DONTWAIT); + if ((d->m_flags & M_EXT) == 0) { + m_free(d); + d = NULL; + } + } + } + if (!d) { + m_freem(m); + if (d0) + m_freem(d0); + return ENOBUFS; + } + if (!d0) + d0 = d; + if (dp) + dp->m_next = d; + + d->m_len = M_TRAILINGSPACE(d); + d->m_len -= d->m_len % AES_BLOCKLEN; + if (d->m_len > i) + d->m_len = i; + dptr = mtod(d, u_int8_t *); + dn = 0; + } + + /* adjust len if greater than space available */ + if (len > d->m_len - dn) + len = d->m_len - dn; + + /* encrypt */ + aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn, + (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt))); + + /* update offsets */ + sn += len; + dn += len; + + /* next iv */ + ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted + + /* find the next source block and skip empty mbufs */ + while (s && sn >= s->m_len) { + sn -= s->m_len; + soff += s->m_len; + s = s->m_next; + } + + } + + /* free un-needed source mbufs and add dest mbufs to chain */ + m_freem(scut->m_next); + scut->m_len = scutoff; + scut->m_next = d0; + + /* just in case */ + bzero(sbuf, sizeof(sbuf)); + key_sa_stir_iv(sav); + return 0; }