+/*
+ * Copyright (c) 2008 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
/* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
/* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
#include <sys/queue.h>
#include <sys/syslog.h>
#include <sys/mbuf.h>
+#include <sys/mcache.h>
#include <kern/locks.h>
#include <netinet6/esp.h>
#include <netinet6/esp_rijndael.h>
-#include <crypto/aes/aes.h>
+#include <libkern/crypto/aes.h>
+
+#include <netkey/key.h>
#include <net/net_osdep.h>
+#define MAX_REALIGN_LEN 2000
#define AES_BLOCKLEN 16
extern lck_mtx_t *sadb_mutex;
int
-esp_aes_schedlen(algo)
- const struct esp_algorithm *algo;
+esp_aes_schedlen(
+ __unused const struct esp_algorithm *algo)
{
return sizeof(aes_ctx);
}
int
-esp_aes_schedule(algo, sav)
- const struct esp_algorithm *algo;
- struct secasvar *sav;
+esp_aes_schedule(
+ __unused const struct esp_algorithm *algo,
+ struct secasvar *sav)
{
+
+ lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
aes_ctx *ctx = (aes_ctx*)sav->sched;
- gen_tabs();
- aes_decrypt_key(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
- aes_encrypt_key(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
+ aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt);
+ aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt);
return 0;
}
int soff; /* offset from the head of chain, to head of this mbuf */
int sn, dn; /* offset from the head of the mbuf, to meat */
size_t ivoff, bodyoff;
- u_int8_t iv[AES_BLOCKLEN], *dptr;
- u_int8_t sbuf[AES_BLOCKLEN], *sp;
+ u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr;
+ u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
struct mbuf *scut;
int scutoff;
int i, len;
if (m->m_pkthdr.len < bodyoff) {
ipseclog((LOG_ERR, "esp_cbc_decrypt %s: bad len %d/%lu\n",
- algo->name, m->m_pkthdr.len, (unsigned long)bodyoff));
+ algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
m_freem(m);
return EINVAL;
}
}
/* grab iv */
- m_copydata(m, ivoff, ivlen, iv);
+ m_copydata(m, ivoff, ivlen, (caddr_t) iv);
- lck_mtx_unlock(sadb_mutex);
s = m;
soff = sn = dn = 0;
d = d0 = dp = NULL;
len -= len % AES_BLOCKLEN; // full blocks only
} else {
/* body is non-continuous */
- m_copydata(s, sn, AES_BLOCKLEN, sbuf);
+ m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
sp = sbuf;
len = AES_BLOCKLEN; // 1 block only in sbuf
}
if (d && i > MLEN) {
MCLGET(d, M_DONTWAIT);
if ((d->m_flags & M_EXT) == 0) {
- m_free(d);
- d = NULL;
+ d = m_mbigget(d, M_DONTWAIT);
+ if ((d->m_flags & M_EXT) == 0) {
+ m_free(d);
+ d = NULL;
+ }
}
}
if (!d) {
m_freem(m);
if (d0)
m_freem(d0);
- lck_mtx_lock(sadb_mutex);
return ENOBUFS;
}
if (!d0)
d0 = d;
if (dp)
dp->m_next = d;
+
+ // try to make mbuf data aligned
+ if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
+ m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
+ }
+
d->m_len = M_TRAILINGSPACE(d);
d->m_len -= d->m_len % AES_BLOCKLEN;
if (d->m_len > i)
len = d->m_len - dn;
/* decrypt */
+ // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
+ if (IPSEC_IS_P2ALIGNED(sp)) {
+ sp_unaligned = NULL;
+ } else {
+ sp_unaligned = sp;
+ if (len > MAX_REALIGN_LEN) {
+ return ENOBUFS;
+ }
+ if (sp_aligned == NULL) {
+ sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
+ if (sp_aligned == NULL)
+ return ENOMEM;
+ }
+ sp = sp_aligned;
+ memcpy(sp, sp_unaligned, len);
+ }
+ // no need to check output pointer alignment
aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn,
(aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt)));
+ // update unaligned pointers
+ if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
+ sp = sp_unaligned;
+ }
+
/* udpate offsets */
sn += len;
dn += len;
scut->m_len = scutoff;
scut->m_next = d0;
+ // free memory
+ if (sp_aligned != NULL) {
+ FREE(sp_aligned, M_SECA);
+ sp_aligned = NULL;
+ }
+
/* just in case */
bzero(iv, sizeof(iv));
bzero(sbuf, sizeof(sbuf));
- lck_mtx_lock(sadb_mutex);
return 0;
}
int
-esp_cbc_encrypt_aes(m, off, plen, sav, algo, ivlen)
- struct mbuf *m;
- size_t off;
- size_t plen;
- struct secasvar *sav;
- const struct esp_algorithm *algo;
- int ivlen;
+esp_cbc_encrypt_aes(
+ struct mbuf *m,
+ size_t off,
+ __unused size_t plen,
+ struct secasvar *sav,
+ const struct esp_algorithm *algo,
+ int ivlen)
{
struct mbuf *s;
struct mbuf *d, *d0, *dp;
- int soff, doff; /* offset from the head of chain, to head of this mbuf */
+ int soff; /* offset from the head of chain, to head of this mbuf */
int sn, dn; /* offset from the head of the mbuf, to meat */
size_t ivoff, bodyoff;
- u_int8_t *ivp, *dptr;
- u_int8_t sbuf[AES_BLOCKLEN], *sp;
+ u_int8_t *ivp, *dptr, *ivp_unaligned;
+ u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL;
+ u_int8_t ivp_aligned_buf[AES_BLOCKLEN] __attribute__((aligned(4)));
struct mbuf *scut;
int scutoff;
int i, len;
/* put iv into the packet */
m_copyback(m, ivoff, ivlen, sav->iv);
- ivp = sav->iv;
+ ivp = (u_int8_t *) sav->iv;
if (m->m_pkthdr.len < bodyoff) {
ipseclog((LOG_ERR, "esp_cbc_encrypt %s: bad len %d/%lu\n",
- algo->name, m->m_pkthdr.len, (unsigned long)bodyoff));
+ algo->name, m->m_pkthdr.len, (u_int32_t)bodyoff));
m_freem(m);
return EINVAL;
}
m_freem(m);
return EINVAL;
}
- lck_mtx_unlock(sadb_mutex);
s = m;
soff = sn = dn = 0;
len -= len % AES_BLOCKLEN; // full blocks only
} else {
/* body is non-continuous */
- m_copydata(s, sn, AES_BLOCKLEN, sbuf);
+ m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf);
sp = sbuf;
len = AES_BLOCKLEN; // 1 block only in sbuf
}
if (d && i > MLEN) {
MCLGET(d, M_DONTWAIT);
if ((d->m_flags & M_EXT) == 0) {
- m_free(d);
- d = NULL;
+ d = m_mbigget(d, M_DONTWAIT);
+ if ((d->m_flags & M_EXT) == 0) {
+ m_free(d);
+ d = NULL;
+ }
}
}
if (!d) {
m_freem(m);
if (d0)
m_freem(d0);
- lck_mtx_lock(sadb_mutex);
return ENOBUFS;
}
if (!d0)
if (dp)
dp->m_next = d;
+ // try to make mbuf data aligned
+ if (!IPSEC_IS_P2ALIGNED(d->m_data)) {
+ m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data));
+ }
+
d->m_len = M_TRAILINGSPACE(d);
d->m_len -= d->m_len % AES_BLOCKLEN;
if (d->m_len > i)
len = d->m_len - dn;
/* encrypt */
+ // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
+ if (IPSEC_IS_P2ALIGNED(sp)) {
+ sp_unaligned = NULL;
+ } else {
+ sp_unaligned = sp;
+ if (len > MAX_REALIGN_LEN) {
+ return ENOBUFS;
+ }
+ if (sp_aligned == NULL) {
+ sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT);
+ if (sp_aligned == NULL)
+ return ENOMEM;
+ }
+ sp = sp_aligned;
+ memcpy(sp, sp_unaligned, len);
+ }
+ // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
+ if (IPSEC_IS_P2ALIGNED(ivp)) {
+ ivp_unaligned = NULL;
+ } else {
+ ivp_unaligned = ivp;
+ ivp = ivp_aligned_buf;
+ memcpy(ivp, ivp_unaligned, AES_BLOCKLEN);
+ }
+ // no need to check output pointer alignment
aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn,
(aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt)));
+ // update unaligned pointers
+ if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) {
+ sp = sp_unaligned;
+ }
+ if (!IPSEC_IS_P2ALIGNED(ivp_unaligned)) {
+ ivp = ivp_unaligned;
+ }
+
/* update offsets */
sn += len;
dn += len;
soff += s->m_len;
s = s->m_next;
}
-
}
/* free un-needed source mbufs and add dest mbufs to chain */
m_freem(scut->m_next);
scut->m_len = scutoff;
scut->m_next = d0;
+
+ // free memory
+ if (sp_aligned != NULL) {
+ FREE(sp_aligned, M_SECA);
+ sp_aligned = NULL;
+ }
/* just in case */
bzero(sbuf, sizeof(sbuf));
- lck_mtx_lock(sadb_mutex);
key_sa_stir_iv(sav);
return 0;