1
0
mirror of https://github.com/veracrypt/VeraCrypt.git synced 2025-11-11 02:58:02 -06:00

Fix warning caused by clash between Argon2 AVX2 rotrX macros and integer equivalents in VeraCrypt headers

This commit is contained in:
Mounir IDRASSI
2025-09-06 00:03:03 +09:00
parent c5589ac4fb
commit 2dd4e29430

View File

@@ -183,10 +183,10 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
//#include <immintrin.h> //#include <immintrin.h>
#define rotr32(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1)) #define rotr32_avx2(x) _mm256_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1))
#define rotr24(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10)) #define rotr24_avx2(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10, 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10))
#define rotr16(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9)) #define rotr16_avx2(x) _mm256_shuffle_epi8(x, _mm256_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9, 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9))
#define rotr63(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x))) #define rotr63_avx2(x) _mm256_xor_si256(_mm256_srli_epi64((x), 63), _mm256_add_epi64((x), (x)))
#define G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ #define G1_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
do { \ do { \
@@ -194,27 +194,27 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \ A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \
D0 = _mm256_xor_si256(D0, A0); \ D0 = _mm256_xor_si256(D0, A0); \
D0 = rotr32(D0); \ D0 = rotr32_avx2(D0); \
\ \
ml = _mm256_mul_epu32(C0, D0); \ ml = _mm256_mul_epu32(C0, D0); \
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \ C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \
\ \
B0 = _mm256_xor_si256(B0, C0); \ B0 = _mm256_xor_si256(B0, C0); \
B0 = rotr24(B0); \ B0 = rotr24_avx2(B0); \
\ \
ml = _mm256_mul_epu32(A1, B1); \ ml = _mm256_mul_epu32(A1, B1); \
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \ A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \
D1 = _mm256_xor_si256(D1, A1); \ D1 = _mm256_xor_si256(D1, A1); \
D1 = rotr32(D1); \ D1 = rotr32_avx2(D1); \
\ \
ml = _mm256_mul_epu32(C1, D1); \ ml = _mm256_mul_epu32(C1, D1); \
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \ C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \
\ \
B1 = _mm256_xor_si256(B1, C1); \ B1 = _mm256_xor_si256(B1, C1); \
B1 = rotr24(B1); \ B1 = rotr24_avx2(B1); \
} while((void)0, 0); } while((void)0, 0);
#define G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \ #define G2_AVX2(A0, A1, B0, B1, C0, C1, D0, D1) \
@@ -223,25 +223,25 @@ static BLAKE2_INLINE __m128i fBlaMka(__m128i x, __m128i y) {
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \ A0 = _mm256_add_epi64(A0, _mm256_add_epi64(B0, ml)); \
D0 = _mm256_xor_si256(D0, A0); \ D0 = _mm256_xor_si256(D0, A0); \
D0 = rotr16(D0); \ D0 = rotr16_avx2(D0); \
\ \
ml = _mm256_mul_epu32(C0, D0); \ ml = _mm256_mul_epu32(C0, D0); \
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \ C0 = _mm256_add_epi64(C0, _mm256_add_epi64(D0, ml)); \
B0 = _mm256_xor_si256(B0, C0); \ B0 = _mm256_xor_si256(B0, C0); \
B0 = rotr63(B0); \ B0 = rotr63_avx2(B0); \
\ \
ml = _mm256_mul_epu32(A1, B1); \ ml = _mm256_mul_epu32(A1, B1); \
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \ A1 = _mm256_add_epi64(A1, _mm256_add_epi64(B1, ml)); \
D1 = _mm256_xor_si256(D1, A1); \ D1 = _mm256_xor_si256(D1, A1); \
D1 = rotr16(D1); \ D1 = rotr16_avx2(D1); \
\ \
ml = _mm256_mul_epu32(C1, D1); \ ml = _mm256_mul_epu32(C1, D1); \
ml = _mm256_add_epi64(ml, ml); \ ml = _mm256_add_epi64(ml, ml); \
C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \ C1 = _mm256_add_epi64(C1, _mm256_add_epi64(D1, ml)); \
B1 = _mm256_xor_si256(B1, C1); \ B1 = _mm256_xor_si256(B1, C1); \
B1 = rotr63(B1); \ B1 = rotr63_avx2(B1); \
} while((void)0, 0); } while((void)0, 0);
#define DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \ #define DIAGONALIZE_1(A0, B0, C0, D0, A1, B1, C1, D1) \