1
0
mirror of https://github.com/veracrypt/VeraCrypt.git synced 2025-11-11 02:58:02 -06:00

Windows/Linux/macOS: implement AES hardware support on ARM64 (ARMv8)

This commit is contained in:
Mounir IDRASSI
2025-01-17 00:58:54 +01:00
parent c79f8102e0
commit 54bd819990
22 changed files with 492 additions and 49 deletions

View File

@@ -14,7 +14,7 @@ $(NAME): $(NAME).a
clean:
@echo Cleaning $(NAME)
rm -f $(APPNAME) $(NAME).a $(OBJS) $(OBJSEX) $(OBJSNOOPT) $(OBJSHANI) $(OBJSSSE41) $(OBJSSSSE3) $(OBJS:.o=.d) $(OBJSEX:.oo=.d) $(OBJSNOOPT:.o0=.d) $(OBJSHANI:.oshani=.d) $(OBJSSSE41:.osse41=.d) $(OBJSSSSE3:.ossse3=.d) *.gch
rm -f $(APPNAME) $(NAME).a $(OBJS) $(OBJSEX) $(OBJSNOOPT) $(OBJSHANI) $(OBJSSSE41) $(OBJSSSSE3) $(OBJARMV8CRYPTO) $(OBJS:.o=.d) $(OBJSEX:.oo=.d) $(OBJSNOOPT:.o0=.d) $(OBJSHANI:.oshani=.d) $(OBJSSSE41:.osse41=.d) $(OBJSSSSE3:.ossse3=.d) $(OBJARMV8CRYPTO:.oarmv8crypto=.d) *.gch
%.o: %.c
@echo Compiling $(<F)
@@ -36,6 +36,10 @@ clean:
@echo Compiling $(<F)
$(CC) $(CFLAGS) -mssse3 -c $< -o $@
%.oarmv8crypto: %.c
@echo Compiling $(<F)
$(CC) $(CFLAGS) -march=armv8-a+crypto -c $< -o $@
%.o: %.cpp
@echo Compiling $(<F)
$(CXX) $(CXXFLAGS) -c $< -o $@
@@ -96,10 +100,10 @@ TR_SED_BIN := tr '\n' ' ' | tr -s ' ' ',' | sed -e 's/^,//g' -e 's/,$$/n/' | tr
# Dependencies
-include $(OBJS:.o=.d) $(OBJSEX:.oo=.d) $(OBJSNOOPT:.o0=.d) $(OBJSHANI:.oshani=.d) $(OBJSSSE41:.osse41=.d) $(OBJSSSSE3:.ossse3=.d)
-include $(OBJS:.o=.d) $(OBJSEX:.oo=.d) $(OBJSNOOPT:.o0=.d) $(OBJSHANI:.oshani=.d) $(OBJSSSE41:.osse41=.d) $(OBJSSSSE3:.ossse3=.d) $(OBJARMV8CRYPTO:.oarmv8crypto=.d)
$(NAME).a: $(OBJS) $(OBJSEX) $(OBJSNOOPT) $(OBJSHANI) $(OBJSSSE41) $(OBJSSSSE3)
$(NAME).a: $(OBJS) $(OBJSEX) $(OBJSNOOPT) $(OBJSHANI) $(OBJSSSE41) $(OBJSSSSE3) $(OBJARMV8CRYPTO)
@echo Updating library $@
$(AR) $(AFLAGS) -rc $@ $(OBJS) $(OBJSEX) $(OBJSNOOPT) $(OBJSHANI) $(OBJSSSE41) $(OBJSSSSE3)
$(AR) $(AFLAGS) -rc $@ $(OBJS) $(OBJSEX) $(OBJSNOOPT) $(OBJSHANI) $(OBJSSSE41) $(OBJSSSSE3) $(OBJARMV8CRYPTO)
$(RANLIB) $@

View File

@@ -1164,8 +1164,6 @@ BOOL IsAesHwCpuSupported ()
}
return state && !HwEncryptionDisabled;
#elif defined (_M_ARM64) || defined(__arm__) || defined (__arm64__) || defined (__aarch64__)
return 0;
#else
return (HasAESNI() && !HwEncryptionDisabled)? TRUE : FALSE;
#endif
@@ -1483,29 +1481,3 @@ void VcUnprotectKeys (PCRYPTO_INFO pCryptoInfo, uint64 encID)
#endif
#if defined(_M_ARM64) || defined(__arm__) || defined (__arm64__) || defined (__aarch64__)
/* dummy implementation that should never be called */
void aes_hw_cpu_decrypt(const uint8* ks, uint8* data)
{
ks = ks;
data = data;
}
void aes_hw_cpu_decrypt_32_blocks(const uint8* ks, uint8* data)
{
ks = ks;
data = data;
}
void aes_hw_cpu_encrypt(const uint8* ks, uint8* data)
{
ks = ks;
data = data;
}
void aes_hw_cpu_encrypt_32_blocks(const uint8* ks, uint8* data)
{
ks = ks;
data = data;
}
#endif

View File

@@ -1046,7 +1046,7 @@ BOOL IsOSVersionAtLeast (OSVersionEnum reqMinOS, int reqMinServicePack)
>= (major << 16 | minor << 8 | reqMinServicePack));
}
BOOL IsWin10BuildAtLeast(DWORD minBuild)
BOOL IsWin10BuildAtLeast(int minBuild)
{
// Must first be recognized as Windows 10 or higher
if (nCurrentOS < WIN_10)
@@ -14883,6 +14883,7 @@ void GetAppRandomSeed (unsigned char* pbRandSeed, size_t cbRandSeed)
}
}
#ifndef _M_ARM64
// use RDSEED or RDRAND from CPU as source of entropy if enabled
if ( IsCpuRngEnabled() &&
( (HasRDSEED() && RDSEED_getBytes (digest, sizeof (digest)))
@@ -14891,6 +14892,7 @@ void GetAppRandomSeed (unsigned char* pbRandSeed, size_t cbRandSeed)
{
WHIRLPOOL_add (digest, sizeof(digest), &tctx);
}
#endif
WHIRLPOOL_finalize (&tctx, digest);
count = VC_MIN (cbRandSeed, sizeof (digest));

View File

@@ -503,7 +503,7 @@ void Debug (char *format, ...);
void DebugMsgBox (char *format, ...);
BOOL IsOSAtLeast (OSVersionEnum reqMinOS);
BOOL IsOSVersionAtLeast (OSVersionEnum reqMinOS, int reqMinServicePack);
BOOL IsWin10BuildAtLeast(DWORD minBuild);
BOOL IsWin10BuildAtLeast(int minBuild);
BOOL IsSupportedOS ();
BOOL Is64BitOs ();
BOOL IsARM();

View File

@@ -878,6 +878,7 @@ BOOL SlowPoll (void)
}
}
#ifndef _M_ARM64
// use RDSEED or RDRAND from CPU as source of entropy if present
if ( IsCpuRngEnabled() &&
( (HasRDSEED() && RDSEED_getBytes (buffer, sizeof (buffer)))
@@ -886,6 +887,7 @@ BOOL SlowPoll (void)
{
RandaddBuf (buffer, sizeof (buffer));
}
#endif
burn(buffer, sizeof (buffer));
@@ -1011,6 +1013,7 @@ BOOL FastPoll (void)
return FALSE;
}
#ifndef _M_ARM64
// use RDSEED or RDRAND from CPU as source of entropy if enabled
if ( IsCpuRngEnabled() &&
( (HasRDSEED() && RDSEED_getBytes (buffer, sizeof (buffer)))
@@ -1019,6 +1022,7 @@ BOOL FastPoll (void)
{
RandaddBuf (buffer, sizeof (buffer));
}
#endif
burn (buffer, sizeof(buffer));

View File

@@ -1490,7 +1490,9 @@ BOOL AutoTestAlgorithms (void)
{
/* unexepected exception raised. Disable all CPU extended feature and try again */
EnableHwEncryption (hwEncryptionEnabled);
#ifndef _M_ARM64
DisableCPUExtendedFeatures ();
#endif
__try
{
result = DoAutoTestAlgorithms();

316
src/Crypto/Aes_hw_armv8.c Normal file
View File

@@ -0,0 +1,316 @@
/*
* AES using ARMv8
* Contributed by Jeffrey Walton
*
* Further changes
* (C) 2017,2018 Jack Lloyd
*
* Botan is released under the Simplified BSD License (see license.txt)
*/
/* Modified and adapted for VeraCrypt */
#include "Common/Tcdefs.h"
#include "Aes_hw_cpu.h"
#if !defined(_UEFI)
#include <memory.h>
#include <stdlib.h>
#endif
#include "cpu.h"
#include "misc.h"
#if CRYPTOPP_ARM_AES_AVAILABLE
#include <arm_neon.h>
// Single block encryption operations
VC_INLINE void aes_enc_block(uint8x16_t* B, uint8x16_t K)
{
*B = vaesmcq_u8(vaeseq_u8(*B, K));
}
VC_INLINE void aes_enc_block_last(uint8x16_t* B, uint8x16_t K, uint8x16_t K2)
{
*B = veorq_u8(vaeseq_u8(*B, K), K2);
}
// 4-block parallel encryption operations
VC_INLINE void aes_enc_4_blocks(uint8x16_t* B0, uint8x16_t* B1,
uint8x16_t* B2, uint8x16_t* B3, uint8x16_t K)
{
*B0 = vaesmcq_u8(vaeseq_u8(*B0, K));
*B1 = vaesmcq_u8(vaeseq_u8(*B1, K));
*B2 = vaesmcq_u8(vaeseq_u8(*B2, K));
*B3 = vaesmcq_u8(vaeseq_u8(*B3, K));
}
VC_INLINE void aes_enc_4_blocks_last(uint8x16_t* B0, uint8x16_t* B1,
uint8x16_t* B2, uint8x16_t* B3,
uint8x16_t K, uint8x16_t K2)
{
*B0 = veorq_u8(vaeseq_u8(*B0, K), K2);
*B1 = veorq_u8(vaeseq_u8(*B1, K), K2);
*B2 = veorq_u8(vaeseq_u8(*B2, K), K2);
*B3 = veorq_u8(vaeseq_u8(*B3, K), K2);
}
// Single block decryption operations
VC_INLINE void aes_dec_block(uint8x16_t* B, uint8x16_t K)
{
*B = vaesimcq_u8(vaesdq_u8(*B, K));
}
VC_INLINE void aes_dec_block_last(uint8x16_t* B, uint8x16_t K, uint8x16_t K2)
{
*B = veorq_u8(vaesdq_u8(*B, K), K2);
}
// 4-block parallel decryption operations
VC_INLINE void aes_dec_4_blocks(uint8x16_t* B0, uint8x16_t* B1,
uint8x16_t* B2, uint8x16_t* B3, uint8x16_t K)
{
*B0 = vaesimcq_u8(vaesdq_u8(*B0, K));
*B1 = vaesimcq_u8(vaesdq_u8(*B1, K));
*B2 = vaesimcq_u8(vaesdq_u8(*B2, K));
*B3 = vaesimcq_u8(vaesdq_u8(*B3, K));
}
VC_INLINE void aes_dec_4_blocks_last(uint8x16_t* B0, uint8x16_t* B1,
uint8x16_t* B2, uint8x16_t* B3,
uint8x16_t K, uint8x16_t K2)
{
*B0 = veorq_u8(vaesdq_u8(*B0, K), K2);
*B1 = veorq_u8(vaesdq_u8(*B1, K), K2);
*B2 = veorq_u8(vaesdq_u8(*B2, K), K2);
*B3 = veorq_u8(vaesdq_u8(*B3, K), K2);
}
VC_INLINE void aes256_hw_encrypt_blocks(uint8 buffer[], size_t blocks, const uint8* ks)
{
const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
while(blocks >= 4) {
uint8x16_t B0 = vld1q_u8(buffer);
uint8x16_t B1 = vld1q_u8(buffer + 16);
uint8x16_t B2 = vld1q_u8(buffer + 32);
uint8x16_t B3 = vld1q_u8(buffer + 48);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K0);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K1);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K2);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K3);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K4);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K5);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K6);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K7);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K8);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K9);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K10);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K11);
aes_enc_4_blocks(&B0, &B1, &B2, &B3, K12);
aes_enc_4_blocks_last(&B0, &B1, &B2, &B3, K13, K14);
vst1q_u8(buffer, B0);
vst1q_u8(buffer + 16, B1);
vst1q_u8(buffer + 32, B2);
vst1q_u8(buffer + 48, B3);
buffer += 16 * 4;
blocks -= 4;
}
for(size_t i = 0; i != blocks; ++i) {
uint8x16_t B = vld1q_u8(buffer + 16 * i);
aes_enc_block(&B, K0);
aes_enc_block(&B, K1);
aes_enc_block(&B, K2);
aes_enc_block(&B, K3);
aes_enc_block(&B, K4);
aes_enc_block(&B, K5);
aes_enc_block(&B, K6);
aes_enc_block(&B, K7);
aes_enc_block(&B, K8);
aes_enc_block(&B, K9);
aes_enc_block(&B, K10);
aes_enc_block(&B, K11);
aes_enc_block(&B, K12);
aes_enc_block_last(&B, K13, K14);
vst1q_u8(buffer + 16 * i, B);
}
}
VC_INLINE void aes256_hw_encrypt_block(uint8 buffer[], const uint8* ks)
{
const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
uint8x16_t B = vld1q_u8(buffer);
aes_enc_block(&B, K0);
aes_enc_block(&B, K1);
aes_enc_block(&B, K2);
aes_enc_block(&B, K3);
aes_enc_block(&B, K4);
aes_enc_block(&B, K5);
aes_enc_block(&B, K6);
aes_enc_block(&B, K7);
aes_enc_block(&B, K8);
aes_enc_block(&B, K9);
aes_enc_block(&B, K10);
aes_enc_block(&B, K11);
aes_enc_block(&B, K12);
aes_enc_block_last(&B, K13, K14);
vst1q_u8(buffer, B);
}
VC_INLINE void aes256_hw_decrypt_blocks(uint8 buffer[], size_t blocks, const uint8* ks)
{
const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
while(blocks >= 4) {
uint8x16_t B0 = vld1q_u8(buffer);
uint8x16_t B1 = vld1q_u8(buffer + 16);
uint8x16_t B2 = vld1q_u8(buffer + 32);
uint8x16_t B3 = vld1q_u8(buffer + 48);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K0);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K1);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K2);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K3);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K4);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K5);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K6);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K7);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K8);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K9);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K10);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K11);
aes_dec_4_blocks(&B0, &B1, &B2, &B3, K12);
aes_dec_4_blocks_last(&B0, &B1, &B2, &B3, K13, K14);
vst1q_u8(buffer, B0);
vst1q_u8(buffer + 16, B1);
vst1q_u8(buffer + 32, B2);
vst1q_u8(buffer + 48, B3);
buffer += 16 * 4;
blocks -= 4;
}
for(size_t i = 0; i != blocks; ++i) {
uint8x16_t B = vld1q_u8(buffer + 16 * i);
aes_dec_block(&B, K0);
aes_dec_block(&B, K1);
aes_dec_block(&B, K2);
aes_dec_block(&B, K3);
aes_dec_block(&B, K4);
aes_dec_block(&B, K5);
aes_dec_block(&B, K6);
aes_dec_block(&B, K7);
aes_dec_block(&B, K8);
aes_dec_block(&B, K9);
aes_dec_block(&B, K10);
aes_dec_block(&B, K11);
aes_dec_block(&B, K12);
aes_dec_block_last(&B, K13, K14);
vst1q_u8(buffer + 16 * i, B);
}
}
VC_INLINE void aes256_hw_decrypt_block(uint8 buffer[], const uint8* ks)
{
const uint8x16_t K0 = vld1q_u8(ks + 0 * 16);
const uint8x16_t K1 = vld1q_u8(ks + 1 * 16);
const uint8x16_t K2 = vld1q_u8(ks + 2 * 16);
const uint8x16_t K3 = vld1q_u8(ks + 3 * 16);
const uint8x16_t K4 = vld1q_u8(ks + 4 * 16);
const uint8x16_t K5 = vld1q_u8(ks + 5 * 16);
const uint8x16_t K6 = vld1q_u8(ks + 6 * 16);
const uint8x16_t K7 = vld1q_u8(ks + 7 * 16);
const uint8x16_t K8 = vld1q_u8(ks + 8 * 16);
const uint8x16_t K9 = vld1q_u8(ks + 9 * 16);
const uint8x16_t K10 = vld1q_u8(ks + 10 * 16);
const uint8x16_t K11 = vld1q_u8(ks + 11 * 16);
const uint8x16_t K12 = vld1q_u8(ks + 12 * 16);
const uint8x16_t K13 = vld1q_u8(ks + 13 * 16);
const uint8x16_t K14 = vld1q_u8(ks + 14 * 16);
uint8x16_t B = vld1q_u8(buffer);
aes_dec_block(&B, K0);
aes_dec_block(&B, K1);
aes_dec_block(&B, K2);
aes_dec_block(&B, K3);
aes_dec_block(&B, K4);
aes_dec_block(&B, K5);
aes_dec_block(&B, K6);
aes_dec_block(&B, K7);
aes_dec_block(&B, K8);
aes_dec_block(&B, K9);
aes_dec_block(&B, K10);
aes_dec_block(&B, K11);
aes_dec_block(&B, K12);
aes_dec_block_last(&B, K13, K14);
vst1q_u8(buffer, B);
}
void aes_hw_cpu_decrypt (const uint8 *ks, uint8 *data)
{
aes256_hw_decrypt_block(data, ks);
}
void aes_hw_cpu_decrypt_32_blocks (const uint8 *ks, uint8 *data)
{
aes256_hw_decrypt_blocks(data, 32, ks);
}
void aes_hw_cpu_encrypt (const uint8 *ks, uint8 *data)
{
aes256_hw_encrypt_block(data, ks);
}
void aes_hw_cpu_encrypt_32_blocks (const uint8 *ks, uint8 *data)
{
aes256_hw_encrypt_blocks(data, 32, ks);
}
#endif

View File

@@ -22,8 +22,8 @@ extern "C"
#if defined (TC_WINDOWS_BOOT)
uint8 is_aes_hw_cpu_supported ();
#endif
void aes_hw_cpu_enable_sse ();
#endif
void aes_hw_cpu_decrypt (const uint8 *ks, uint8 *data);
void VC_CDECL aes_hw_cpu_decrypt_32_blocks (const uint8 *ks, uint8 *data);
void aes_hw_cpu_encrypt (const uint8 *ks, uint8 *data);

View File

@@ -226,6 +226,10 @@
</ClCompile>
<ClCompile Include="Aeskey.c" />
<ClCompile Include="Aestab.c" />
<ClCompile Include="Aes_hw_armv8.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="blake2s.c" />
<ClCompile Include="blake2s_SSE2.c" />
<ClCompile Include="blake2s_SSE41.c" />

View File

@@ -90,6 +90,12 @@
<ClCompile Include="Sha2Intel.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Aescrypt.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Aes_hw_armv8.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Aes.h">
@@ -167,15 +173,6 @@
<ClInclude Include="t1ha_selfcheck.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="blake2s-load-sse2.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="blake2s-load-sse41.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="blake2s-round.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<CustomBuild Include="Aes_hw_cpu.asm">

View File

@@ -29,6 +29,11 @@
#define CRYPTOPP_CLANG_INTEGRATED_ASSEMBLER 1
#endif
#if defined(_MSC_VER) && !defined(__clang__)
# undef CRYPTOPP_LLVM_CLANG_VERSION
# define CRYPTOPP_MSC_VERSION (_MSC_VER)
#endif
// Clang due to "Inline assembly operands don't work with .intel_syntax", http://llvm.org/bugs/show_bug.cgi?id=24232
// TODO: supply the upper version when LLVM fixes it. We set it to 20.0 for compilation purposes.
#if (defined(CRYPTOPP_LLVM_CLANG_VERSION) && CRYPTOPP_LLVM_CLANG_VERSION <= 200000) || (defined(CRYPTOPP_APPLE_CLANG_VERSION) && CRYPTOPP_APPLE_CLANG_VERSION <= 200000) || defined(CRYPTOPP_CLANG_INTEGRATED_ASSEMBLER)
@@ -201,6 +206,40 @@
#define CRYPTOPP_BOOL_X64 0
#endif
#if defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64)
#define CRYPTOPP_BOOL_ARMV8 1
#define CRYPTOPP_BOOL_ARM64 1
#else
#define CRYPTOPP_BOOL_ARMV8 0
#define CRYPTOPP_BOOL_ARM64 0
#endif
// ARMv8 and ASIMD. -march=armv8-a or above must be present
// Requires GCC 4.8, Clang 3.3 or Visual Studio 2017
// Do not use APPLE_CLANG_VERSION; use __ARM_FEATURE_XXX instead.
#if !defined(CRYPTOPP_ARM_ASIMD_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ARM_ASIMD)
# if defined(__aarch32__) || defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64)
# if defined(__ARM_NEON) || defined(__ARM_ASIMD) || defined(__ARM_FEATURE_NEON) || defined(__ARM_FEATURE_ASIMD) || \
(CRYPTOPP_GCC_VERSION >= 40800) || (CRYPTOPP_LLVM_CLANG_VERSION >= 30300) || \
(CRYPTOPP_APPLE_CLANG_VERSION >= 40000) || (CRYPTOPP_MSC_VERSION >= 1916)
# define CRYPTOPP_ARM_NEON_AVAILABLE 1
# define CRYPTOPP_ARM_ASIMD_AVAILABLE 1
# endif // Compilers
# endif // Platforms
#endif
// ARMv8 and AES. -march=armv8-a+crypto or above must be present
// Requires GCC 4.8, Clang 3.3 or Visual Studio 2017
#if !defined(CRYPTOPP_ARM_AES_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ARM_AES)
# if defined(__aarch32__) || defined(__aarch64__) || defined(_M_ARM64)
# if defined(__ARM_FEATURE_CRYPTO) || (CRYPTOPP_GCC_VERSION >= 40800) || \
(CRYPTOPP_LLVM_CLANG_VERSION >= 30300) || (CRYPTOPP_APPLE_CLANG_VERSION >= 40300) || \
(CRYPTOPP_MSC_VERSION >= 1916)
# define CRYPTOPP_ARM_AES_AVAILABLE 1
# endif // Compilers
# endif // Platforms
#endif
// Undo the ASM and Intrinsic related defines due to X32.
#if CRYPTOPP_BOOL_X32
# undef CRYPTOPP_BOOL_X64

View File

@@ -469,3 +469,41 @@ void DisableCPUExtendedFeatures ()
#endif
#if CRYPTOPP_BOOL_ARMV8
volatile int g_hasAESARM = 0;
#ifndef HWCAP_AES
# define HWCAP_AES (1 << 3)
#endif
inline int CPU_QueryAES()
{
#if defined(CRYPTOPP_ARM_AES_AVAILABLE)
#if defined(__linux__) && defined(__aarch64__)
if ((getauxval(AT_HWCAP) & HWCAP_AES) != 0)
return 1;
#elif defined(__APPLE__) && defined(__aarch64__)
// Apple Sillcon (M1) and later
return 1;
#elif defined(_WIN32) && defined(_M_ARM64)
#ifdef TC_WINDOWS_DRIVER
if (ExIsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0)
return 1;
#else
if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0)
return 1;
#endif
#endif
return 0;
#else
return 0;
#endif
}
void DetectArmFeatures()
{
g_hasAESARM = CPU_QueryAES();
}
#endif

View File

@@ -288,6 +288,24 @@ void DisableCPUExtendedFeatures ();
}
#endif
#elif CRYPTOPP_BOOL_ARMV8
#if defined(__cplusplus)
extern "C" {
#endif
#if !defined(CRYPTOPP_DISABLE_AESNI) && !defined(WOLFCRYPT_BACKEND)
#define TC_AES_HW_CPU
#endif
extern volatile int g_hasAESARM;
void DetectArmFeatures();
#define HasAESNI() g_hasAESARM
#if defined(__cplusplus)
}
#endif
#else
#define HasSSE2() 0

View File

@@ -228,6 +228,10 @@ copy $(OutDir)veracrypt.inf "$(SolutionDir)Debug\Setup Files\veracrypt.inf"</Com
</ClCompile>
<ClCompile Include="..\Crypto\Aeskey.c" />
<ClCompile Include="..\Crypto\Aestab.c" />
<ClCompile Include="..\Crypto\Aes_hw_armv8.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="..\Crypto\blake2s.c" />
<ClCompile Include="..\Crypto\blake2s_SSE2.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'">true</ExcludedFromBuild>

View File

@@ -165,6 +165,9 @@
<ClCompile Include="..\Driver\VolumeFilter.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\Crypto\Aes_hw_armv8.c">
<Filter>Crypto\Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\Common\Tcdefs.h">

View File

@@ -232,7 +232,7 @@ void GetDriverRandomSeed (unsigned char* pbRandSeed, size_t cbRandSeed)
jent_entropy_collector_free (ec);
}
}
#ifndef _M_ARM64
// use RDSEED or RDRAND from CPU as source of entropy if enabled
if ( IsCpuRngEnabled() &&
( (HasRDSEED() && RDSEED_getBytes (digest, sizeof (digest)))
@@ -241,6 +241,7 @@ void GetDriverRandomSeed (unsigned char* pbRandSeed, size_t cbRandSeed)
{
WHIRLPOOL_add (digest, sizeof(digest), &tctx);
}
#endif
WHIRLPOOL_finalize (&tctx, digest);
count = VC_MIN (cbRandSeed, sizeof (digest));
@@ -266,7 +267,11 @@ NTSTATUS DriverEntry(PDRIVER_OBJECT DriverObject, PUNICODE_STRING RegistryPath)
Dump("DriverEntry " TC_APP_NAME " " VERSION_STRING VERSION_STRING_SUFFIX "\n");
#ifndef _M_ARM64
DetectX86Features();
#else
DetectArmFeatures();
#endif
PsGetVersion(&OsMajorVersion, &OsMinorVersion, NULL, NULL);
@@ -293,7 +298,11 @@ NTSTATUS DriverEntry(PDRIVER_OBJECT DriverObject, PUNICODE_STRING RegistryPath)
{
// in case of system encryption, if self-tests fail, disable all extended CPU
// features and try again in order to workaround faulty configurations
#ifndef _M_ARM64
DisableCPUExtendedFeatures();
#else
EnableHwEncryption(FALSE);
#endif
SelfTestsPassed = AutoTestAlgorithms();
// BUG CHECK if the self-tests still fail

View File

@@ -1093,7 +1093,11 @@ int WINAPI wWinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance, wchar_t *lpsz
/* application title */
lpszTitle = L"VeraCrypt Expander";
#ifndef _M_ARM64
DetectX86Features();
#else
DetectArmFeatures();
#endif
status = DriverAttach ();
if (status != 0)

View File

@@ -10588,7 +10588,11 @@ int WINAPI wWinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance, wchar_t *lpsz
VirtualLock (&szFileName, sizeof(szFileName));
VirtualLock (&szDiskFile, sizeof(szDiskFile));
#ifndef _M_ARM64
DetectX86Features();
#else
DetectArmFeatures();
#endif
try
{

View File

@@ -555,6 +555,9 @@ namespace VeraCrypt
#ifdef CRYPTOPP_CPUID_AVAILABLE
DetectX86Features ();
#endif
#if CRYPTOPP_BOOL_ARMV8
DetectArmFeatures();
#endif
LangString.Init();
Core->Init();

View File

@@ -162,6 +162,9 @@ else ifneq (,$(filter x86_64 x86-64 amd64 x64,$(ARCH)))
else ifneq (,$(filter armv7l,$(ARCH)))
PLATFORM_ARCH := armv7
CPU_ARCH = armv7
else ifneq (,$(filter aarch64 arm64 armv8l,$(ARCH)))
PLATFORM_ARCH := arm64
CPU_ARCH = arm64
endif
ifeq "$(origin NOASM)" "command line"
@@ -338,6 +341,9 @@ $(error Specified SDK version was not found, ensure your active developer direct
VC_FUSE_PACKAGE := fuse-t
endif
export CFLAGS_ARM64 := $(CFLAGS) $(C_CXX_FLAGS) -arch arm64 -march=armv8-a+crypto
export CFLAGS_X64 := $(CFLAGS) $(C_CXX_FLAGS) -arch x86_64
# Set x86 assembly flags (-msse2, -mssse3, -msse4.1)
# Apply flags if SIMD_SUPPORTED is 1 or if not in local development build (we are creating universal binary in this case)
ifneq "$(LOCAL_DEVELOPMENT_BUILD)" "true"

View File

@@ -10132,7 +10132,11 @@ int WINAPI wWinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance, wchar_t *lpsz
VirtualLock (&defaultMountOptions, sizeof (defaultMountOptions));
VirtualLock (&szFileName, sizeof(szFileName));
#ifndef _M_ARM64
DetectX86Features();
#else
DetectArmFeatures();
#endif
try
{

View File

@@ -43,6 +43,7 @@ ifeq "$(PLATFORM)" "MacOSX"
ifneq "$(COMPILE_ASM)" "false"
OBJSEX += ../Crypto/Aes_asm.oo
OBJS += ../Crypto/Aes_hw_cpu.o
OBJSEX += ../Crypto/Aes_hw_armv8.oo
OBJS += ../Crypto/Aescrypt.o
OBJSEX += ../Crypto/Twofish_asm.oo
OBJSEX += ../Crypto/Camellia_asm.oo
@@ -78,6 +79,9 @@ else ifeq "$(CPU_ARCH)" "x64"
OBJS += ../Crypto/sha512_avx1_x64.o
OBJS += ../Crypto/sha512_avx2_x64.o
OBJS += ../Crypto/sha512_sse4_x64.o
else ifeq "$(CPU_ARCH)" "arm64"
OBJARMV8CRYPTO += ../Crypto/Aes_hw_armv8.oarmv8crypto
OBJS += ../Crypto/Aescrypt.o
else
OBJS += ../Crypto/Aescrypt.o
endif
@@ -140,6 +144,12 @@ VolumeLibrary: Volume.a
ifeq "$(ENABLE_WOLFCRYPT)" "0"
ifeq "$(PLATFORM)" "MacOSX"
ifneq "$(COMPILE_ASM)" "false"
../Crypto/Aes_hw_armv8.oo: ../Crypto/Aes_hw_armv8.c
@echo Compiling $(<F)
$(CC) $(CFLAGS_ARM64) -c ../Crypto/Aes_hw_armv8.c -o ../Crypto/Aes_hw_armv8_arm64.o
$(CC) $(CFLAGS_X64) -c ../Crypto/Aes_hw_armv8.c -o ../Crypto/Aes_hw_armv8_x64.o
lipo -create ../Crypto/Aes_hw_armv8_arm64.o ../Crypto/Aes_hw_armv8_x64.o -output ../Crypto/Aes_hw_armv8.oo
rm -fr ../Crypto/Aes_hw_armv8_arm64.o ../Crypto/Aes_hw_armv8_x64.o
../Crypto/Aes_asm.oo: ../Crypto/Aes_x86.asm ../Crypto/Aes_x64.asm
@echo Assembling $(<F)
$(AS) $(ASFLAGS32) -o ../Crypto/Aes_x86.o ../Crypto/Aes_x86.asm