X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/72b2accb108e3d3ec8113ce2d183638c719a5d0d..f378b41f9ab2493bcbc5892d482b18826b0b84c0:/apt-pkg/contrib/sha2_internal.cc diff --git a/apt-pkg/contrib/sha2_internal.cc b/apt-pkg/contrib/sha2_internal.cc index 6d27e8f2b..f70b7b17d 100644 --- a/apt-pkg/contrib/sha2_internal.cc +++ b/apt-pkg/contrib/sha2_internal.cc @@ -33,6 +33,7 @@ */ #include +#include #include /* memcpy()/memset() or bcopy()/bzero() */ #include /* assert() */ #include "sha2_internal.h" @@ -65,7 +66,7 @@ * Please make sure that your system defines BYTE_ORDER. If your * architecture is little-endian, make sure it also defines * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are - * equivilent. + * equivalent. * * If your system does not define the above, then you can do so by * hand like this: @@ -128,6 +129,14 @@ typedef u_int64_t sha2_word64; /* Exactly 8 bytes */ /*** ENDIAN REVERSAL MACROS *******************************************/ #if BYTE_ORDER == LITTLE_ENDIAN +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) +#define REVERSE32(w,x) { \ + (x) = __builtin_bswap32(w); \ +} +#define REVERSE64(w,x) { \ + (x) = __builtin_bswap64(w); \ +} +#else #define REVERSE32(w,x) { \ sha2_word32 tmp = (w); \ tmp = (tmp >> 16) | (tmp << 16); \ @@ -141,6 +150,7 @@ typedef u_int64_t sha2_word64; /* Exactly 8 bytes */ (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \ ((tmp & 0x0000ffff0000ffffULL) << 16); \ } +#endif #endif /* BYTE_ORDER == LITTLE_ENDIAN */ /* @@ -552,7 +562,9 @@ void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) { } while (len >= SHA256_BLOCK_LENGTH) { /* Process as many complete blocks as we can */ - SHA256_Transform(context, (sha2_word32*)data); + sha2_byte buffer[SHA256_BLOCK_LENGTH]; + MEMCPY_BCOPY(buffer, data, SHA256_BLOCK_LENGTH); + SHA256_Transform(context, (sha2_word32*)buffer); context->bitcount += SHA256_BLOCK_LENGTH << 3; len -= SHA256_BLOCK_LENGTH; data += SHA256_BLOCK_LENGTH; @@ -630,7 +642,7 @@ void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) { } /* Clean up state data: */ - MEMSET_BZERO(context, sizeof(context)); + MEMSET_BZERO(context, sizeof(*context)); usedspace = 0; } @@ -651,7 +663,7 @@ char *SHA256_End(SHA256_CTX* context, char buffer[]) { } *buffer = (char)0; } else { - MEMSET_BZERO(context, sizeof(context)); + MEMSET_BZERO(context, sizeof(*context)); } MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH); return buffer; @@ -879,7 +891,9 @@ void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) { } while (len >= SHA512_BLOCK_LENGTH) { /* Process as many complete blocks as we can */ - SHA512_Transform(context, (sha2_word64*)data); + sha2_byte buffer[SHA512_BLOCK_LENGTH]; + MEMCPY_BCOPY(buffer, data, SHA512_BLOCK_LENGTH); + SHA512_Transform(context, (sha2_word64*)buffer); ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3); len -= SHA512_BLOCK_LENGTH; data += SHA512_BLOCK_LENGTH; @@ -965,7 +979,7 @@ void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) { } /* Zero out state data */ - MEMSET_BZERO(context, sizeof(context)); + MEMSET_BZERO(context, sizeof(*context)); } char *SHA512_End(SHA512_CTX* context, char buffer[]) { @@ -985,7 +999,7 @@ char *SHA512_End(SHA512_CTX* context, char buffer[]) { } *buffer = (char)0; } else { - MEMSET_BZERO(context, sizeof(context)); + MEMSET_BZERO(context, sizeof(*context)); } MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH); return buffer; @@ -1040,7 +1054,7 @@ void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) { } /* Zero out state data */ - MEMSET_BZERO(context, sizeof(context)); + MEMSET_BZERO(context, sizeof(*context)); } char *SHA384_End(SHA384_CTX* context, char buffer[]) { @@ -1060,7 +1074,7 @@ char *SHA384_End(SHA384_CTX* context, char buffer[]) { } *buffer = (char)0; } else { - MEMSET_BZERO(context, sizeof(context)); + MEMSET_BZERO(context, sizeof(*context)); } MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH); return buffer;