Move load/store helpers to crypto/internal.h.

We have loads of variations of these. Align them in one set. This avoids
the HOST_* macros defined by md32_common.h, so it'll be a little easier
to make it a more conventional header.

Change-Id: Id47fe7b51a8f961bd87839f8146d8a5aa8027aa6
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/46425
Reviewed-by: Adam Langley <agl@google.com>
chromium-5359
David Benjamin 4 years ago committed by Adam Langley
parent 8d4c8fc41b
commit ca4598781a
  1. 47
      crypto/fipsmodule/digest/md32_common.h
  2. 89
      crypto/fipsmodule/md4/md4.c
  3. 89
      crypto/fipsmodule/md5/md5.c
  4. 15
      crypto/fipsmodule/modes/cbc.c
  5. 13
      crypto/fipsmodule/modes/cfb.c
  6. 11
      crypto/fipsmodule/modes/ctr.c
  7. 20
      crypto/fipsmodule/modes/gcm.c
  8. 21
      crypto/fipsmodule/modes/internal.h
  9. 93
      crypto/fipsmodule/sha/sha1.c
  10. 81
      crypto/fipsmodule/sha/sha256.c
  11. 58
      crypto/fipsmodule/sha/sha512.c
  12. 52
      crypto/internal.h
  13. 28
      decrepit/ripemd/internal.h
  14. 66
      decrepit/ripemd/ripemd.c

@ -136,44 +136,6 @@ extern "C" {
#error "HASH_MAKE_STRING must be defined!"
#endif
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
#define HOST_c2l(c, l) \
do { \
(l) = (((uint32_t)(*((c)++))) << 24); \
(l) |= (((uint32_t)(*((c)++))) << 16); \
(l) |= (((uint32_t)(*((c)++))) << 8); \
(l) |= (((uint32_t)(*((c)++)))); \
} while (0)
#define HOST_l2c(l, c) \
do { \
*((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 16) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 8) & 0xff); \
*((c)++) = (uint8_t)(((l)) & 0xff); \
} while (0)
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
#define HOST_c2l(c, l) \
do { \
(l) = (((uint32_t)(*((c)++)))); \
(l) |= (((uint32_t)(*((c)++))) << 8); \
(l) |= (((uint32_t)(*((c)++))) << 16); \
(l) |= (((uint32_t)(*((c)++))) << 24); \
} while (0)
#define HOST_l2c(l, c) \
do { \
*((c)++) = (uint8_t)(((l)) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 8) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 16) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
} while (0)
#endif // DATA_ORDER
int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) {
const uint8_t *data = data_;
@ -247,13 +209,12 @@ int HASH_FINAL(uint8_t out[HASH_DIGEST_LENGTH], HASH_CTX *c) {
// Append a 64-bit length to the block and process it.
uint8_t *p = c->data + HASH_CBLOCK - 8;
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
HOST_l2c(c->Nh, p);
HOST_l2c(c->Nl, p);
CRYPTO_store_u32_be(p, c->Nh);
CRYPTO_store_u32_be(p + 4, c->Nl);
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
HOST_l2c(c->Nl, p);
HOST_l2c(c->Nh, p);
CRYPTO_store_u32_le(p, c->Nl);
CRYPTO_store_u32_le(p + 4, c->Nh);
#endif
assert(p == c->data + HASH_CBLOCK);
HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
c->num = 0;
OPENSSL_memset(c->data, 0, HASH_CBLOCK);

@ -92,17 +92,16 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
#define HASH_UPDATE MD4_Update
#define HASH_TRANSFORM MD4_Transform
#define HASH_FINAL MD4_Final
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
#define HASH_MAKE_STRING(c, s) \
do { \
CRYPTO_store_u32_le((s), (c)->h[0]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[1]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[2]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[3]); \
(s) += 4; \
} while (0)
#define HASH_BLOCK_DATA_ORDER md4_block_data_order
@ -136,7 +135,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
} while (0)
void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
uint32_t A, B, C, D, l;
uint32_t A, B, C, D;
uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15;
A = state[0];
@ -145,53 +144,53 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
D = state[3];
for (; num--;) {
HOST_c2l(data, l);
X0 = l;
HOST_c2l(data, l);
X1 = l;
X0 = CRYPTO_load_u32_le(data);
data += 4;
X1 = CRYPTO_load_u32_le(data);
data += 4;
// Round 0
R0(A, B, C, D, X0, 3, 0);
HOST_c2l(data, l);
X2 = l;
X2 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X1, 7, 0);
HOST_c2l(data, l);
X3 = l;
X3 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X2, 11, 0);
HOST_c2l(data, l);
X4 = l;
X4 = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X3, 19, 0);
HOST_c2l(data, l);
X5 = l;
X5 = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X4, 3, 0);
HOST_c2l(data, l);
X6 = l;
X6 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X5, 7, 0);
HOST_c2l(data, l);
X7 = l;
X7 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X6, 11, 0);
HOST_c2l(data, l);
X8 = l;
X8 = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X7, 19, 0);
HOST_c2l(data, l);
X9 = l;
X9 = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X8, 3, 0);
HOST_c2l(data, l);
X10 = l;
X10 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X9, 7, 0);
HOST_c2l(data, l);
X11 = l;
X11 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X10, 11, 0);
HOST_c2l(data, l);
X12 = l;
X12 = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X11, 19, 0);
HOST_c2l(data, l);
X13 = l;
X13 = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X12, 3, 0);
HOST_c2l(data, l);
X14 = l;
X14 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X13, 7, 0);
HOST_c2l(data, l);
X15 = l;
X15 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X14, 11, 0);
R0(B, C, D, A, X15, 19, 0);
// Round 1
@ -252,5 +251,3 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
#undef R0
#undef R1
#undef R2
#undef HOST_c2l
#undef HOST_l2c

@ -98,17 +98,16 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
#define HASH_UPDATE MD5_Update
#define HASH_TRANSFORM MD5_Transform
#define HASH_FINAL MD5_Final
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
#define HASH_MAKE_STRING(c, s) \
do { \
CRYPTO_store_u32_le((s), (c)->h[0]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[1]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[2]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[3]); \
(s) += 4; \
} while (0)
#define HASH_BLOCK_DATA_ORDER md5_block_data_order
@ -158,7 +157,7 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
#endif
static void md5_block_data_order(uint32_t *state, const uint8_t *data,
size_t num) {
uint32_t A, B, C, D, l;
uint32_t A, B, C, D;
uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12,
XX13, XX14, XX15;
#define X(i) XX##i
@ -169,53 +168,53 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
D = state[3];
for (; num--;) {
HOST_c2l(data, l);
X(0) = l;
HOST_c2l(data, l);
X(1) = l;
X(0) = CRYPTO_load_u32_le(data);
data += 4;
X(1) = CRYPTO_load_u32_le(data);
data += 4;
// Round 0
R0(A, B, C, D, X(0), 7, 0xd76aa478L);
HOST_c2l(data, l);
X(2) = l;
X(2) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(1), 12, 0xe8c7b756L);
HOST_c2l(data, l);
X(3) = l;
X(3) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(2), 17, 0x242070dbL);
HOST_c2l(data, l);
X(4) = l;
X(4) = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X(3), 22, 0xc1bdceeeL);
HOST_c2l(data, l);
X(5) = l;
X(5) = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X(4), 7, 0xf57c0fafL);
HOST_c2l(data, l);
X(6) = l;
X(6) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(5), 12, 0x4787c62aL);
HOST_c2l(data, l);
X(7) = l;
X(7) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(6), 17, 0xa8304613L);
HOST_c2l(data, l);
X(8) = l;
X(8) = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X(7), 22, 0xfd469501L);
HOST_c2l(data, l);
X(9) = l;
X(9) = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X(8), 7, 0x698098d8L);
HOST_c2l(data, l);
X(10) = l;
X(10) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(9), 12, 0x8b44f7afL);
HOST_c2l(data, l);
X(11) = l;
X(11) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(10), 17, 0xffff5bb1L);
HOST_c2l(data, l);
X(12) = l;
X(12) = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X(11), 22, 0x895cd7beL);
HOST_c2l(data, l);
X(13) = l;
X(13) = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X(12), 7, 0x6b901122L);
HOST_c2l(data, l);
X(14) = l;
X(14) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(13), 12, 0xfd987193L);
HOST_c2l(data, l);
X(15) = l;
X(15) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(14), 17, 0xa679438eL);
R0(B, C, D, A, X(15), 22, 0x49b40821L);
// Round 1
@ -297,5 +296,3 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
#undef R1
#undef R2
#undef R3
#undef HOST_c2l
#undef HOST_l2c

@ -52,6 +52,7 @@
#include <openssl/type_check.h>
#include "internal.h"
#include "../../internal.h"
void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
@ -68,7 +69,8 @@ void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const uint8_t *iv = ivec;
while (len >= 16) {
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
store_word_le(out + n, load_word_le(in + n) ^ load_word_le(iv + n));
CRYPTO_store_word_le(
out + n, CRYPTO_load_word_le(in + n) ^ CRYPTO_load_word_le(iv + n));
}
(*block)(out, out, key);
iv = out;
@ -128,7 +130,8 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
while (len >= 16) {
(*block)(in, out, key);
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
store_word_le(out + n, load_word_le(out + n) ^ load_word_le(iv + n));
CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(out + n) ^
CRYPTO_load_word_le(iv + n));
}
iv = in;
len -= 16;
@ -143,10 +146,10 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
while (len >= 16) {
(*block)(in, tmp.c, key);
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
crypto_word_t c = load_word_le(in + n);
store_word_le(
out + n, tmp.t[n / sizeof(crypto_word_t)] ^ load_word_le(ivec + n));
store_word_le(ivec + n, c);
crypto_word_t c = CRYPTO_load_word_le(in + n);
CRYPTO_store_word_le(out + n, tmp.t[n / sizeof(crypto_word_t)] ^
CRYPTO_load_word_le(ivec + n));
CRYPTO_store_word_le(ivec + n, c);
}
len -= 16;
in += 16;

@ -73,9 +73,10 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
while (len >= 16) {
(*block)(ivec, ivec, key);
for (; n < 16; n += sizeof(crypto_word_t)) {
crypto_word_t tmp = load_word_le(ivec + n) ^ load_word_le(in + n);
store_word_le(ivec + n, tmp);
store_word_le(out + n, tmp);
crypto_word_t tmp =
CRYPTO_load_word_le(ivec + n) ^ CRYPTO_load_word_le(in + n);
CRYPTO_store_word_le(ivec + n, tmp);
CRYPTO_store_word_le(out + n, tmp);
}
len -= 16;
out += 16;
@ -102,9 +103,9 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
while (len >= 16) {
(*block)(ivec, ivec, key);
for (; n < 16; n += sizeof(crypto_word_t)) {
crypto_word_t t = load_word_le(in + n);
store_word_le(out + n, load_word_le(ivec + n) ^ t);
store_word_le(ivec + n, t);
crypto_word_t t = CRYPTO_load_word_le(in + n);
CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(ivec + n) ^ t);
CRYPTO_store_word_le(ivec + n, t);
}
len -= 16;
out += 16;

@ -52,6 +52,7 @@
#include <string.h>
#include "internal.h"
#include "../../internal.h"
// NOTE: the IV/counter CTR mode is big-endian. The code itself
@ -103,8 +104,8 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
(*block)(ivec, ecount_buf, key);
ctr128_inc(ivec);
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
store_word_le(out + n,
load_word_le(in + n) ^ load_word_le(ecount_buf + n));
CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(in + n) ^
CRYPTO_load_word_le(ecount_buf + n));
}
len -= 16;
out += 16;
@ -152,7 +153,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
n = (n + 1) % 16;
}
ctr32 = GETU32(ivec + 12);
ctr32 = CRYPTO_load_u32_be(ivec + 12);
while (len >= 16) {
size_t blocks = len / 16;
// 1<<28 is just a not-so-small yet not-so-large number...
@ -172,7 +173,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
}
(*func)(in, out, blocks, key, ivec);
// (*func) does not update ivec, caller does:
PUTU32(ivec + 12, ctr32);
CRYPTO_store_u32_be(ivec + 12, ctr32);
// ... overflow was detected, propogate carry.
if (ctr32 == 0) {
ctr96_inc(ivec);
@ -186,7 +187,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
OPENSSL_memset(ecount_buf, 0, 16);
(*func)(ecount_buf, ecount_buf, 1, key, ivec);
++ctr32;
PUTU32(ivec + 12, ctr32);
CRYPTO_store_u32_be(ivec + 12, ctr32);
if (ctr32 == 0) {
ctr96_inc(ivec);
}

@ -378,8 +378,9 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
store_word_le(out + i, load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;
@ -395,8 +396,9 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
store_word_le(out + i, load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;
@ -469,8 +471,9 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
store_word_le(out + i, load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;
@ -486,8 +489,9 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
store_word_le(out + i, load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;

@ -64,27 +64,6 @@ extern "C" {
#endif
static inline uint32_t GETU32(const void *in) {
uint32_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return CRYPTO_bswap4(v);
}
static inline void PUTU32(void *out, uint32_t v) {
v = CRYPTO_bswap4(v);
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline crypto_word_t load_word_le(const void *in) {
crypto_word_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return v;
}
static inline void store_word_le(void *out, crypto_word_t v) {
OPENSSL_memcpy(out, &v, sizeof(v));
}
// block128_f is the type of an AES block cipher implementation.
//
// Unlike upstream OpenSSL, it and the other functions in this file hard-code

@ -88,19 +88,18 @@ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t out[SHA_DIGEST_LENGTH]) {
#define HASH_CTX SHA_CTX
#define HASH_CBLOCK 64
#define HASH_DIGEST_LENGTH 20
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[4]; \
HOST_l2c(ll, (s)); \
#define HASH_MAKE_STRING(c, s) \
do { \
CRYPTO_store_u32_be((s), (c)->h[0]); \
(s) += 4; \
CRYPTO_store_u32_be((s), (c)->h[1]); \
(s) += 4; \
CRYPTO_store_u32_be((s), (c)->h[2]); \
(s) += 4; \
CRYPTO_store_u32_be((s), (c)->h[3]); \
(s) += 4; \
CRYPTO_store_u32_be((s), (c)->h[4]); \
(s) += 4; \
} while (0)
#define HASH_UPDATE SHA1_Update
@ -193,7 +192,7 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
#if !defined(SHA1_ASM)
static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
size_t num) {
register uint32_t A, B, C, D, E, T, l;
register uint32_t A, B, C, D, E, T;
uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
XX11, XX12, XX13, XX14, XX15;
@ -204,52 +203,52 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
E = state[4];
for (;;) {
HOST_c2l(data, l);
X(0) = l;
HOST_c2l(data, l);
X(1) = l;
X(0) = CRYPTO_load_u32_be(data);
data += 4;
X(1) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(0, A, B, C, D, E, T, X(0));
HOST_c2l(data, l);
X(2) = l;
X(2) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(1, T, A, B, C, D, E, X(1));
HOST_c2l(data, l);
X(3) = l;
X(3) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(2, E, T, A, B, C, D, X(2));
HOST_c2l(data, l);
X(4) = l;
X(4) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(3, D, E, T, A, B, C, X(3));
HOST_c2l(data, l);
X(5) = l;
X(5) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(4, C, D, E, T, A, B, X(4));
HOST_c2l(data, l);
X(6) = l;
X(6) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(5, B, C, D, E, T, A, X(5));
HOST_c2l(data, l);
X(7) = l;
X(7) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(6, A, B, C, D, E, T, X(6));
HOST_c2l(data, l);
X(8) = l;
X(8) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(7, T, A, B, C, D, E, X(7));
HOST_c2l(data, l);
X(9) = l;
X(9) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(8, E, T, A, B, C, D, X(8));
HOST_c2l(data, l);
X(10) = l;
X(10) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(9, D, E, T, A, B, C, X(9));
HOST_c2l(data, l);
X(11) = l;
X(11) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(10, C, D, E, T, A, B, X(10));
HOST_c2l(data, l);
X(12) = l;
X(12) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(11, B, C, D, E, T, A, X(11));
HOST_c2l(data, l);
X(13) = l;
X(13) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(12, A, B, C, D, E, T, X(12));
HOST_c2l(data, l);
X(14) = l;
X(14) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(13, T, A, B, C, D, E, X(13));
HOST_c2l(data, l);
X(15) = l;
X(15) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(14, E, T, A, B, C, D, X(14));
BODY_00_15(15, D, E, T, A, B, C, X(15));
@ -367,5 +366,3 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
#undef BODY_40_59
#undef BODY_60_79
#undef X
#undef HOST_c2l
#undef HOST_l2c

@ -139,19 +139,18 @@ int SHA224_Final(uint8_t out[SHA224_DIGEST_LENGTH], SHA256_CTX *ctx) {
// hash 'final' function can fail. This should never happen.
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
unsigned int nn; \
switch ((c)->md_len) { \
case SHA224_DIGEST_LENGTH: \
for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) { \
ll = (c)->h[nn]; \
HOST_l2c(ll, (s)); \
CRYPTO_store_u32_be((s), (c)->h[nn]); \
(s) += 4; \
} \
break; \
case SHA256_DIGEST_LENGTH: \
for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) { \
ll = (c)->h[nn]; \
HOST_l2c(ll, (s)); \
CRYPTO_store_u32_be((s), (c)->h[nn]); \
(s) += 4; \
} \
break; \
default: \
@ -159,8 +158,8 @@ int SHA224_Final(uint8_t out[SHA224_DIGEST_LENGTH], SHA256_CTX *ctx) {
return 0; \
} \
for (nn = 0; nn < (c)->md_len / 4; nn++) { \
ll = (c)->h[nn]; \
HOST_l2c(ll, (s)); \
CRYPTO_store_u32_be((s), (c)->h[nn]); \
(s) += 4; \
} \
break; \
} \
@ -241,55 +240,53 @@ static void sha256_block_data_order(uint32_t *state, const uint8_t *data,
g = state[6];
h = state[7];
uint32_t l;
HOST_c2l(data, l);
T1 = X[0] = l;
T1 = X[0] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(0, a, b, c, d, e, f, g, h);
HOST_c2l(data, l);
T1 = X[1] = l;
T1 = X[1] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(1, h, a, b, c, d, e, f, g);
HOST_c2l(data, l);
T1 = X[2] = l;
T1 = X[2] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(2, g, h, a, b, c, d, e, f);
HOST_c2l(data, l);
T1 = X[3] = l;
T1 = X[3] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(3, f, g, h, a, b, c, d, e);
HOST_c2l(data, l);
T1 = X[4] = l;
T1 = X[4] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(4, e, f, g, h, a, b, c, d);
HOST_c2l(data, l);
T1 = X[5] = l;
T1 = X[5] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(5, d, e, f, g, h, a, b, c);
HOST_c2l(data, l);
T1 = X[6] = l;
T1 = X[6] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(6, c, d, e, f, g, h, a, b);
HOST_c2l(data, l);
T1 = X[7] = l;
T1 = X[7] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(7, b, c, d, e, f, g, h, a);
HOST_c2l(data, l);
T1 = X[8] = l;
T1 = X[8] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(8, a, b, c, d, e, f, g, h);
HOST_c2l(data, l);
T1 = X[9] = l;
T1 = X[9] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(9, h, a, b, c, d, e, f, g);
HOST_c2l(data, l);
T1 = X[10] = l;
T1 = X[10] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(10, g, h, a, b, c, d, e, f);
HOST_c2l(data, l);
T1 = X[11] = l;
T1 = X[11] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(11, f, g, h, a, b, c, d, e);
HOST_c2l(data, l);
T1 = X[12] = l;
T1 = X[12] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(12, e, f, g, h, a, b, c, d);
HOST_c2l(data, l);
T1 = X[13] = l;
T1 = X[13] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(13, d, e, f, g, h, a, b, c);
HOST_c2l(data, l);
T1 = X[14] = l;
T1 = X[14] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(14, c, d, e, f, g, h, a, b);
HOST_c2l(data, l);
T1 = X[15] = l;
T1 = X[15] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(15, b, c, d, e, f, g, h, a);
for (i = 16; i < 64; i += 8) {
@ -339,5 +336,3 @@ void SHA256_TransformBlocks(uint32_t state[8], const uint8_t *data,
#undef Maj
#undef ROUND_00_15
#undef ROUND_16_63
#undef HOST_c2l
#undef HOST_l2c

@ -256,22 +256,8 @@ static int sha512_final_impl(uint8_t *out, SHA512_CTX *sha) {
}
OPENSSL_memset(p + n, 0, sizeof(sha->p) - 16 - n);
p[sizeof(sha->p) - 1] = (uint8_t)(sha->Nl);
p[sizeof(sha->p) - 2] = (uint8_t)(sha->Nl >> 8);
p[sizeof(sha->p) - 3] = (uint8_t)(sha->Nl >> 16);
p[sizeof(sha->p) - 4] = (uint8_t)(sha->Nl >> 24);
p[sizeof(sha->p) - 5] = (uint8_t)(sha->Nl >> 32);
p[sizeof(sha->p) - 6] = (uint8_t)(sha->Nl >> 40);
p[sizeof(sha->p) - 7] = (uint8_t)(sha->Nl >> 48);
p[sizeof(sha->p) - 8] = (uint8_t)(sha->Nl >> 56);
p[sizeof(sha->p) - 9] = (uint8_t)(sha->Nh);
p[sizeof(sha->p) - 10] = (uint8_t)(sha->Nh >> 8);
p[sizeof(sha->p) - 11] = (uint8_t)(sha->Nh >> 16);
p[sizeof(sha->p) - 12] = (uint8_t)(sha->Nh >> 24);
p[sizeof(sha->p) - 13] = (uint8_t)(sha->Nh >> 32);
p[sizeof(sha->p) - 14] = (uint8_t)(sha->Nh >> 40);
p[sizeof(sha->p) - 15] = (uint8_t)(sha->Nh >> 48);
p[sizeof(sha->p) - 16] = (uint8_t)(sha->Nh >> 56);
CRYPTO_store_u64_be(p + sizeof(sha->p) - 16, sha->Nh);
CRYPTO_store_u64_be(p + sizeof(sha->p) - 8, sha->Nl);
sha512_block_data_order(sha->h, p, 1);
@ -368,12 +354,6 @@ static const uint64_t K512[80] = {
#define ROTR(x, s) (((x) >> s) | (x) << (64 - s))
#endif
static inline uint64_t load_u64_be(const void *ptr) {
uint64_t ret;
OPENSSL_memcpy(&ret, ptr, sizeof(ret));
return CRYPTO_bswap8(ret);
}
#define Sigma0(x) (ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39))
#define Sigma1(x) (ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41))
#define sigma0(x) (ROTR((x), 1) ^ ROTR((x), 8) ^ ((x) >> 7))
@ -404,7 +384,7 @@ static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
F[7] = state[7];
for (i = 0; i < 16; i++, F--) {
T = load_u64_be(in + i * 8);
T = CRYPTO_load_u64_be(in + i * 8);
F[0] = A;
F[4] = E;
F[8] = T;
@ -476,37 +456,37 @@ static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
g = state[6];
h = state[7];
T1 = X[0] = load_u64_be(in);
T1 = X[0] = CRYPTO_load_u64_be(in);
ROUND_00_15(0, a, b, c, d, e, f, g, h);
T1 = X[1] = load_u64_be(in + 8);
T1 = X[1] = CRYPTO_load_u64_be(in + 8);
ROUND_00_15(1, h, a, b, c, d, e, f, g);
T1 = X[2] = load_u64_be(in + 2 * 8);
T1 = X[2] = CRYPTO_load_u64_be(in + 2 * 8);
ROUND_00_15(2, g, h, a, b, c, d, e, f);
T1 = X[3] = load_u64_be(in + 3 * 8);
T1 = X[3] = CRYPTO_load_u64_be(in + 3 * 8);
ROUND_00_15(3, f, g, h, a, b, c, d, e);
T1 = X[4] = load_u64_be(in + 4 * 8);
T1 = X[4] = CRYPTO_load_u64_be(in + 4 * 8);
ROUND_00_15(4, e, f, g, h, a, b, c, d);
T1 = X[5] = load_u64_be(in + 5 * 8);
T1 = X[5] = CRYPTO_load_u64_be(in + 5 * 8);
ROUND_00_15(5, d, e, f, g, h, a, b, c);
T1 = X[6] = load_u64_be(in + 6 * 8);
T1 = X[6] = CRYPTO_load_u64_be(in + 6 * 8);
ROUND_00_15(6, c, d, e, f, g, h, a, b);
T1 = X[7] = load_u64_be(in + 7 * 8);
T1 = X[7] = CRYPTO_load_u64_be(in + 7 * 8);
ROUND_00_15(7, b, c, d, e, f, g, h, a);
T1 = X[8] = load_u64_be(in + 8 * 8);
T1 = X[8] = CRYPTO_load_u64_be(in + 8 * 8);
ROUND_00_15(8, a, b, c, d, e, f, g, h);
T1 = X[9] = load_u64_be(in + 9 * 8);
T1 = X[9] = CRYPTO_load_u64_be(in + 9 * 8);
ROUND_00_15(9, h, a, b, c, d, e, f, g);
T1 = X[10] = load_u64_be(in + 10 * 8);
T1 = X[10] = CRYPTO_load_u64_be(in + 10 * 8);
ROUND_00_15(10, g, h, a, b, c, d, e, f);
T1 = X[11] = load_u64_be(in + 11 * 8);
T1 = X[11] = CRYPTO_load_u64_be(in + 11 * 8);
ROUND_00_15(11, f, g, h, a, b, c, d, e);
T1 = X[12] = load_u64_be(in + 12 * 8);
T1 = X[12] = CRYPTO_load_u64_be(in + 12 * 8);
ROUND_00_15(12, e, f, g, h, a, b, c, d);
T1 = X[13] = load_u64_be(in + 13 * 8);
T1 = X[13] = CRYPTO_load_u64_be(in + 13 * 8);
ROUND_00_15(13, d, e, f, g, h, a, b, c);
T1 = X[14] = load_u64_be(in + 14 * 8);
T1 = X[14] = CRYPTO_load_u64_be(in + 14 * 8);
ROUND_00_15(14, c, d, e, f, g, h, a, b);
T1 = X[15] = load_u64_be(in + 15 * 8);
T1 = X[15] = CRYPTO_load_u64_be(in + 15 * 8);
ROUND_00_15(15, b, c, d, e, f, g, h, a);
for (i = 16; i < 80; i += 16) {

@ -819,6 +819,58 @@ static inline void *OPENSSL_memset(void *dst, int c, size_t n) {
return memset(dst, c, n);
}
// Loads and stores.
//
// The following functions load and store sized integers with the specified
// endianness. They use |memcpy|, and so avoid alignment or strict aliasing
// requirements on the input and output pointers.
static inline uint32_t CRYPTO_load_u32_le(const void *in) {
uint32_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return v;
}
static inline void CRYPTO_store_u32_le(void *out, uint32_t v) {
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline uint32_t CRYPTO_load_u32_be(const void *in) {
uint32_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return CRYPTO_bswap4(v);
}
static inline void CRYPTO_store_u32_be(void *out, uint32_t v) {
v = CRYPTO_bswap4(v);
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline uint64_t CRYPTO_load_u64_be(const void *ptr) {
uint64_t ret;
OPENSSL_memcpy(&ret, ptr, sizeof(ret));
return CRYPTO_bswap8(ret);
}
static inline void CRYPTO_store_u64_be(void *out, uint64_t v) {
v = CRYPTO_bswap8(v);
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline crypto_word_t CRYPTO_load_word_le(const void *in) {
crypto_word_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return v;
}
static inline void CRYPTO_store_word_le(void *out, crypto_word_t v) {
OPENSSL_memcpy(out, &v, sizeof(v));
}
// FIPS functions.
#if defined(BORINGSSL_FIPS)
// BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test
// fails. It prevents any further cryptographic operations by the current

@ -59,6 +59,8 @@
#include <openssl/base.h>
#include "../../crypto/internal.h"
#if defined(__cplusplus)
extern "C" {
#endif
@ -76,20 +78,20 @@ static void ripemd160_block_data_order(uint32_t h[5], const uint8_t *data,
#define HASH_UPDATE RIPEMD160_Update
#define HASH_TRANSFORM RIPEMD160_Transform
#define HASH_FINAL RIPEMD160_Final
#define HASH_MAKE_STRING(c, s) \
do { \
unsigned long ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[4]; \
HOST_l2c(ll, (s)); \
#define HASH_MAKE_STRING(c, s) \
do { \
CRYPTO_store_u32_le((s), (c)->h[0]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[1]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[2]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[3]); \
(s) += 4; \
CRYPTO_store_u32_le((s), (c)->h[4]); \
(s) += 4; \
} while (0)
#define HASH_BLOCK_DATA_ORDER ripemd160_block_data_order
#include "../../crypto/fipsmodule/digest/md32_common.h"

@ -74,7 +74,7 @@ int RIPEMD160_Init(RIPEMD160_CTX *ctx) {
static void ripemd160_block_data_order(uint32_t h[5], const uint8_t *data,
size_t num) {
uint32_t A, B, C, D, E;
uint32_t a, b, c, d, e, l;
uint32_t a, b, c, d, e;
uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12,
XX13, XX14, XX15;
#define X(i) XX##i
@ -86,52 +86,52 @@ static void ripemd160_block_data_order(uint32_t h[5], const uint8_t *data,
D = h[3];
E = h[4];
HOST_c2l(data, l);
X(0) = l;
HOST_c2l(data, l);
X(1) = l;
X(0) = CRYPTO_load_u32_le(data);
data += 4;
X(1) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(A, B, C, D, E, WL00, SL00);
HOST_c2l(data, l);
X(2) = l;
X(2) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(E, A, B, C, D, WL01, SL01);
HOST_c2l(data, l);
X(3) = l;
X(3) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(D, E, A, B, C, WL02, SL02);
HOST_c2l(data, l);
X(4) = l;
X(4) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(C, D, E, A, B, WL03, SL03);
HOST_c2l(data, l);
X(5) = l;
X(5) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(B, C, D, E, A, WL04, SL04);
HOST_c2l(data, l);
X(6) = l;
X(6) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(A, B, C, D, E, WL05, SL05);
HOST_c2l(data, l);
X(7) = l;
X(7) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(E, A, B, C, D, WL06, SL06);
HOST_c2l(data, l);
X(8) = l;
X(8) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(D, E, A, B, C, WL07, SL07);
HOST_c2l(data, l);
X(9) = l;
X(9) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(C, D, E, A, B, WL08, SL08);
HOST_c2l(data, l);
X(10) = l;
X(10) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(B, C, D, E, A, WL09, SL09);
HOST_c2l(data, l);
X(11) = l;
X(11) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(A, B, C, D, E, WL10, SL10);
HOST_c2l(data, l);
X(12) = l;
X(12) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(E, A, B, C, D, WL11, SL11);
HOST_c2l(data, l);
X(13) = l;
X(13) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(D, E, A, B, C, WL12, SL12);
HOST_c2l(data, l);
X(14) = l;
X(14) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(C, D, E, A, B, WL13, SL13);
HOST_c2l(data, l);
X(15) = l;
X(15) = CRYPTO_load_u32_le(data);
data += 4;
RIP1(B, C, D, E, A, WL14, SL14);
RIP1(A, B, C, D, E, WL15, SL15);

Loading…
Cancel
Save