From 2dd4e29430c613fcb6a59cbefa3cd919dd4e42d8 Mon Sep 17 00:00:00 2001 From: Mounir IDRASSI Date: Sat, 6 Sep 2025 00:03:03 +0900 Subject: [PATCH] Fix warning caused by clash between Argon2 AVX2 rotrX macros and integer equivalents in VeraCrypt headers --- .../Argon2/src/blake2/blamka-round-opt.h | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/Crypto/Argon2/src/blake2/blamka-round-opt.h b/src/Crypto/Argon2/src/blake2/blamka-round-opt.h index ab6bce5e..21fa0349 100644 --- a/src/Crypto/Argon2/src/blake2/blamka-round-opt.h +++ b/src/Crypto/Argon2/src/blake2/blamka-round-opt.h @@ -183,10 +183,10 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) { //#include -#define rotr32(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1)) -#define rotr24(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10)) -#define rotr16(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9)) -#define rotr63(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x))) +#define rotr32_avx2(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1)) +#define rotr24_avx2(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10)) +#define rotr16_avx2(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9)) +#define rotr63_avx2(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x))) #define G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ do { \ @@ -194,27 +194,27 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) { ml = _mm256_add_epi64(ml, ml); \ A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \ D0 = _mm256_xor_si256(D0, A0); \ - D0 = rotr32(D0); \ + D0 = rotr32_avx2(D0); \ \ ml = _mm256_mul_epu32(C0, D0); \ ml = _mm256_add_epi64(ml, ml); \ C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \ \ B0 = _mm256_xor_si256(B0, C0); \ - B0 = rotr24(B0); \ + B0 = rotr24_avx2(B0); \ \ ml = _mm256_mul_epu32(A1, B1); \ ml = _mm256_add_epi64(ml, ml); \ A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \ D1 = _mm256_xor_si256(D1, A1); \ - D1 = rotr32(D1); \ + D1 = rotr32_avx2(D1); \ \ ml = _mm256_mul_epu32(C1, D1); \ ml = _mm256_add_epi64(ml, ml); \ C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \ \ B1 = _mm256_xor_si256(B1, C1); \ - B1 = rotr24(B1); \ + B1 = rotr24_avx2(B1); \ } while((void)0, 0); #define G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ @@ -223,25 +223,25 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) { ml = _mm256_add_epi64(ml, ml); \ A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \ D0 = _mm256_xor_si256(D0, A0); \ - D0 = rotr16(D0); \ + D0 = rotr16_avx2(D0); \ \ ml = _mm256_mul_epu32(C0, D0); \ ml = _mm256_add_epi64(ml, ml); \ C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \ B0 = _mm256_xor_si256(B0, C0); \ - B0 = rotr63(B0); \ + B0 = rotr63_avx2(B0); \ \ ml = _mm256_mul_epu32(A1, B1); \ ml = _mm256_add_epi64(ml, ml); \ A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \ D1 = _mm256_xor_si256(D1, A1); \ - D1 = rotr16(D1); \ + D1 = rotr16_avx2(D1); \ \ ml = _mm256_mul_epu32(C1, D1); \ ml = _mm256_add_epi64(ml, ml); \ C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \ B1 = _mm256_xor_si256(B1, C1); \ - B1 = rotr63(B1); \ + B1 = rotr63_avx2(B1); \ } while((void)0, 0); #define DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \