From dee49241f90c1422460d7198f2e65c5edc2f1024 Mon Sep 17 00:00:00 2001 From: zhsnew Date: Wed, 13 Nov 2024 15:11:59 +0800 Subject: [PATCH 1/3] White-box SM4, wbsm4kdf, cipher: wbsm4-xiaolai, wbsm4-baiwu, wbsm4-wsise --- Configure | 3 + apps/enc.c | 58 + apps/kdf.c | 5 + apps/speed.c | 138 + crypto/evp/build.info | 1 + crypto/evp/c_allc.c | 32 + crypto/evp/e_wbsm4_baiwu.c | 749 +++ crypto/evp/e_wbsm4_wsise.c | 749 +++ crypto/evp/e_wbsm4_xiaolai.c | 749 +++ crypto/objects/obj_dat.h | 69 +- crypto/objects/obj_mac.num | 21 + crypto/objects/objects.txt | 31 + crypto/sm4/build.info | 9 + crypto/sm4/wb/Bai-Wu-wbsm4.c | 336 ++ crypto/sm4/wb/WBMatrix.c | 5364 +++++++++++++++++ crypto/sm4/wb/WBMatrix.h | 314 + crypto/sm4/wb/WBRandom.h | 25 + crypto/sm4/wb/WSISE-wbsm4.c | 536 ++ crypto/sm4/wb/Xiao-Lai-wbsm4.c | 276 + crypto/sm4/wb/wbsm4.c | 92 + include/crypto/wbsm4.h | 74 + include/crypto/wbstructure.h | 136 + include/openssl/evp.h | 29 + include/openssl/obj_mac.h | 84 + providers/defltprov.c | 28 + providers/implementations/ciphers/build.info | 6 + .../implementations/ciphers/cipher_wbsm4.c | 141 + .../implementations/ciphers/cipher_wbsm4.h | 63 + .../ciphers/cipher_wbsm4_ccm.c | 90 + .../ciphers/cipher_wbsm4_ccm.h | 52 + .../ciphers/cipher_wbsm4_ccm_hw.c | 103 + .../ciphers/cipher_wbsm4_gcm.c | 93 + .../ciphers/cipher_wbsm4_gcm.h | 52 + .../ciphers/cipher_wbsm4_gcm_hw.c | 205 + .../implementations/ciphers/cipher_wbsm4_hw.c | 134 + .../include/prov/implementations.h | 28 + .../implementations/include/prov/names.h | 22 + providers/implementations/kdfs/build.info | 3 + providers/implementations/kdfs/wbsm4kdf.c | 319 + test/build.info | 7 + test/recipes/03-test_internal_wbsm4.t | 18 + test/wbsm4_internal_test.c | 416 ++ 42 files changed, 11657 insertions(+), 3 deletions(-) create mode 100644 crypto/evp/e_wbsm4_baiwu.c create mode 100644 crypto/evp/e_wbsm4_wsise.c create mode 100644 crypto/evp/e_wbsm4_xiaolai.c create mode 100644 crypto/sm4/wb/Bai-Wu-wbsm4.c create mode 100644 crypto/sm4/wb/WBMatrix.c create mode 100644 crypto/sm4/wb/WBMatrix.h create mode 100644 crypto/sm4/wb/WBRandom.h create mode 100644 crypto/sm4/wb/WSISE-wbsm4.c create mode 100644 crypto/sm4/wb/Xiao-Lai-wbsm4.c create mode 100644 crypto/sm4/wb/wbsm4.c create mode 100644 include/crypto/wbsm4.h create mode 100644 include/crypto/wbstructure.h create mode 100644 providers/implementations/ciphers/cipher_wbsm4.c create mode 100644 providers/implementations/ciphers/cipher_wbsm4.h create mode 100644 providers/implementations/ciphers/cipher_wbsm4_ccm.c create mode 100644 providers/implementations/ciphers/cipher_wbsm4_ccm.h create mode 100644 providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c create mode 100644 providers/implementations/ciphers/cipher_wbsm4_gcm.c create mode 100644 providers/implementations/ciphers/cipher_wbsm4_gcm.h create mode 100644 providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c create mode 100644 providers/implementations/ciphers/cipher_wbsm4_hw.c create mode 100644 providers/implementations/kdfs/wbsm4kdf.c create mode 100644 test/recipes/03-test_internal_wbsm4.t create mode 100644 test/wbsm4_internal_test.c diff --git a/Configure b/Configure index e3bd53819..9ce21a424 100755 --- a/Configure +++ b/Configure @@ -488,6 +488,7 @@ my @disablables = ( "sm2_threshold", "sm3", "sm4", + "wbsm4", "zuc", "sock", "srp", @@ -614,6 +615,7 @@ our %disabled = ( # "what" => "comment" "atf_slibce" => "default", "sdf-lib" => "default", "sdf-lib-dynamic" => "default", + "wbsm4" => "default", ); # Note: => pair form used for aesthetics, not to truly make a hash table @@ -688,6 +690,7 @@ my @disable_cascades = ( "tests" => [ "external-tests" ], "comp" => [ "zlib" ], "sm3" => [ "sm2" ], + "sm4" => [ "wbsm4" ], sub { !$disabled{"unit-test"} } => [ "heartbeats" ], sub { !$disabled{"msan"} } => [ "asm" ], diff --git a/apps/enc.c b/apps/enc.c index a1ec6b723..b04c22359 100644 --- a/apps/enc.c +++ b/apps/enc.c @@ -44,6 +44,9 @@ typedef enum OPTION_choice { OPT_E, OPT_IN, OPT_OUT, OPT_PASS, OPT_ENGINE, OPT_D, OPT_P, OPT_V, OPT_NOPAD, OPT_SALT, OPT_NOSALT, OPT_DEBUG, OPT_UPPER_P, OPT_UPPER_A, OPT_A, OPT_Z, OPT_BUFSIZE, OPT_K, OPT_KFILE, OPT_UPPER_K, OPT_NONE, +#ifndef OPENSSL_NO_WBSM4 + OPT_KBINARY, +#endif OPT_UPPER_S, OPT_IV, OPT_MD, OPT_ITER, OPT_PBKDF2, OPT_CIPHER, OPT_R_ENUM, OPT_PROV_ENUM } OPTION_CHOICE; @@ -67,6 +70,9 @@ const OPTIONS enc_options[] = { {"in", OPT_IN, '<', "Input file"}, {"k", OPT_K, 's', "Passphrase"}, {"kfile", OPT_KFILE, '<', "Read passphrase from file"}, +#ifndef OPENSSL_NO_WBSM4 + {"kbinary", OPT_KBINARY, '<', "Read raw key from file"}, +#endif OPT_SECTION("Output"), {"out", OPT_OUT, '>', "Output file"}, @@ -132,6 +138,10 @@ int enc_main(int argc, char **argv) int do_zlib = 0; BIO *bzl = NULL; #endif +#ifndef OPENSSL_NO_WBSM4 + unsigned char *rawkey = NULL; + int rawkeylen = 0; +#endif /* first check the command name */ if (strcmp(argv[0], "base64") == 0) @@ -250,6 +260,16 @@ int enc_main(int argc, char **argv) } str = buf; break; +#ifndef OPENSSL_NO_WBSM4 + case OPT_KBINARY: + in = bio_open_default(opt_arg(), 'r', FORMAT_BINARY); + if (in == NULL) + goto opthelp; + rawkeylen = bio_to_mem(&rawkey, 1024 * 1024 * 40, in); + if (rawkeylen <= 0) + goto opthelp; + break; +#endif case OPT_UPPER_K: hkey = opt_arg(); break; @@ -345,6 +365,17 @@ int enc_main(int argc, char **argv) str = pass; } +#ifndef OPENSSL_NO_WBSM4 + if (rawkey != NULL) { + if (cipher != NULL && rawkeylen != EVP_CIPHER_key_length(cipher)) + { + BIO_printf(bio_err, "invalid raw key length: %d, need: %d\n", + rawkeylen, EVP_CIPHER_key_length(cipher)); + goto end; + } + } + else +#endif if ((str == NULL) && (cipher != NULL) && (hkey == NULL)) { if (1) { #ifndef OPENSSL_NO_UI_CONSOLE @@ -565,6 +596,18 @@ int enc_main(int argc, char **argv) if (nopad) EVP_CIPHER_CTX_set_padding(ctx, 0); +#ifndef OPENSSL_NO_WBSM4 + if (rawkey) { + if (!EVP_CipherInit_ex(ctx, NULL, NULL, rawkey, iv, enc)) + { + BIO_printf(bio_err, "Error setting cipher %s\n", + EVP_CIPHER_get0_name(cipher)); + ERR_print_errors(bio_err); + goto end; + } + } + else +#endif if (!EVP_CipherInit_ex(ctx, NULL, NULL, key, iv, enc)) { BIO_printf(bio_err, "Error setting cipher %s\n", EVP_CIPHER_get0_name(cipher)); @@ -584,6 +627,18 @@ int enc_main(int argc, char **argv) printf("%02X", salt[i]); printf("\n"); } +#ifndef OPENSSL_NO_WBSM4 + if (rawkey) + { + printf("key="); + for (i = 0; i < EVP_CIPHER_get_key_length(cipher) && i < 32; i++) + printf("%02X", rawkey[i]); + if (EVP_CIPHER_get_key_length(cipher) > 32) + printf("(...%d)", EVP_CIPHER_get_key_length(cipher)); + printf("\n"); + } + else +#endif if (EVP_CIPHER_get_key_length(cipher) > 0) { printf("key="); for (i = 0; i < EVP_CIPHER_get_key_length(cipher); i++) @@ -638,6 +693,9 @@ int enc_main(int argc, char **argv) EVP_CIPHER_free(cipher); #ifdef ZLIB BIO_free(bzl); +#endif +#ifndef OPENSSL_NO_WBSM4 + OPENSSL_free(rawkey); #endif release_engine(e); OPENSSL_free(pass); diff --git a/apps/kdf.c b/apps/kdf.c index 89ee1f69c..52c310964 100644 --- a/apps/kdf.c +++ b/apps/kdf.c @@ -170,6 +170,11 @@ int kdf_main(int argc, char **argv) if (out == NULL) goto err; +#ifndef OPENSSL_NO_WBSM4 + if (OPENSSL_strcasecmp(argv[0], "wbsm4kdf") == 0) + dkm_len = EVP_KDF_CTX_get_kdf_size(ctx); +#endif + if (dkm_len <= 0) { BIO_printf(bio_err, "Invalid derived key length.\n"); goto err; diff --git a/apps/speed.c b/apps/speed.c index d4e35f4b8..af81d9555 100644 --- a/apps/speed.c +++ b/apps/speed.c @@ -323,6 +323,9 @@ enum { D_CBC_RC5, D_CBC_128_AES, D_CBC_192_AES, D_CBC_256_AES, D_EVP, D_GHASH, D_RAND, D_EVP_CMAC, D_SM3, D_CBC_SM4, D_ECB_SM4, + D_CBC_WBSM4_XIAOLAI, D_ECB_WBSM4_XIAOLAI, + D_CBC_WBSM4_BAIWU, D_ECB_WBSM4_BAIWU, + D_CBC_WBSM4_WSISE, D_ECB_WBSM4_WSISE, D_EEA3_128_ZUC, D_EIA3_128_ZUC, D_SM2_ENCRYPT, D_SM2_DECRYPT, D_SM2_THRESHOLD_DECRYPT, ALGOR_NUM }; @@ -334,6 +337,9 @@ static const char *names[ALGOR_NUM] = { "rc5-cbc", "aes-128-cbc", "aes-192-cbc", "aes-256-cbc", "evp", "ghash", "rand", "cmac", "sm3", "sm4-cbc", "sm4-ecb", + "wbsm4-xiaolai-cbc", "wbsm4-xiaolai-ecb", + "wbsm4-baiwu-cbc", "wbsm4-baiwu-ecb", + "wbsm4-wsise-cbc", "wbsm4-wsise-ecb", "zuc-128-eea3", "zuc-128-eia3", "sm2-encrypt", "sm2-decrypt", "sm2-thr-dec", }; @@ -362,6 +368,19 @@ static const OPT_PAIR doit_choices[] = { {"sm4", D_CBC_SM4}, {"sm4-ecb", D_ECB_SM4}, #endif +#ifndef OPENSSL_NO_WBSM4 + {"wbsm4-xiaolai-cbc", D_CBC_WBSM4_XIAOLAI}, + {"wbsm4-xiaolai", D_CBC_WBSM4_XIAOLAI}, + {"wbsm4-xiaolai-ecb", D_ECB_WBSM4_XIAOLAI}, + + {"wbsm4-baiwu-cbc", D_CBC_WBSM4_BAIWU}, + {"wbsm4-baiwu", D_CBC_WBSM4_BAIWU}, + {"wbsm4-baiwu-ecb", D_ECB_WBSM4_BAIWU}, + + {"wbsm4-wsise-cbc", D_CBC_WBSM4_WSISE}, + {"wbsm4-wsise", D_CBC_WBSM4_WSISE}, + {"wbsm4-wsise-ecb", D_ECB_WBSM4_WSISE}, +#endif #ifndef OPENSSL_NO_ZUC {"zuc-128-eea3", D_EEA3_128_ZUC}, {"zuc-128-eia3", D_EIA3_128_ZUC}, @@ -3109,6 +3128,125 @@ int speed_main(int argc, char **argv) } } #endif +#ifndef OPENSSL_NO_WBSM4 + for (k = 0; k < 2; k++) + { + algindex = D_CBC_WBSM4_XIAOLAI + k; + if (doit[algindex]) + { + int st = 1; + + const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-XIAOLAI"); + if (cipher == NULL) + continue; + + keylen = EVP_CIPHER_key_length(cipher); + unsigned char *local_key = (unsigned char *)OPENSSL_malloc(keylen); + if (local_key == NULL) + continue; + RAND_bytes(local_key, keylen); + + for (i = 0; st && i < loopargs_len; i++) + { + loopargs[i].ctx = init_evp_cipher_ctx(names[algindex], + local_key, keylen); + st = loopargs[i].ctx != NULL; + } + OPENSSL_free(local_key); + + for (testnum = 0; st && testnum < size_num; testnum++) + { + print_message(names[algindex], c[algindex][testnum], + lengths[testnum], seconds.sym); + Time_F(START); + count = + run_benchmark(async_jobs, EVP_Cipher_loop, loopargs); + d = Time_F(STOP); + print_result(algindex, testnum, count, d); + } + for (i = 0; i < loopargs_len; i++) + EVP_CIPHER_CTX_free(loopargs[i].ctx); + } + } + for (k = 0; k < 2; k++) + { + algindex = D_CBC_WBSM4_BAIWU + k; + if (doit[algindex]) + { + int st = 1; + + const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-BAIWU"); + if (cipher == NULL) + continue; + + keylen = EVP_CIPHER_key_length(cipher); + unsigned char *local_key = (unsigned char *)OPENSSL_malloc(keylen); + if (local_key == NULL) + continue; + RAND_bytes(local_key, keylen); + + for (i = 0; st && i < loopargs_len; i++) + { + loopargs[i].ctx = init_evp_cipher_ctx(names[algindex], + local_key, keylen); + st = loopargs[i].ctx != NULL; + } + OPENSSL_free(local_key); + + for (testnum = 0; st && testnum < size_num; testnum++) + { + print_message(names[algindex], c[algindex][testnum], + lengths[testnum], seconds.sym); + Time_F(START); + count = + run_benchmark(async_jobs, EVP_Cipher_loop, loopargs); + d = Time_F(STOP); + print_result(algindex, testnum, count, d); + } + for (i = 0; i < loopargs_len; i++) + EVP_CIPHER_CTX_free(loopargs[i].ctx); + } + } + for (k = 0; k < 2; k++) + { + algindex = D_CBC_WBSM4_WSISE + k; + if (doit[algindex]) + { + int st = 1; + + const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-WSISE"); + if (cipher == NULL) + continue; + + keylen = EVP_CIPHER_key_length(cipher); + unsigned char *local_key = (unsigned char *)OPENSSL_malloc(keylen); + if (local_key == NULL) + continue; + RAND_bytes(local_key, keylen); + + for (i = 0; st && i < loopargs_len; i++) + { + loopargs[i].ctx = init_evp_cipher_ctx(names[algindex], + local_key, keylen); + st = loopargs[i].ctx != NULL; + } + OPENSSL_free(local_key); + + for (testnum = 0; st && testnum < size_num; testnum++) + { + print_message(names[algindex], c[algindex][testnum], + lengths[testnum], seconds.sym); + Time_F(START); + count = + run_benchmark(async_jobs, EVP_Cipher_loop, loopargs); + d = Time_F(STOP); + print_result(algindex, testnum, count, d); + } + for (i = 0; i < loopargs_len; i++) + EVP_CIPHER_CTX_free(loopargs[i].ctx); + } + } +#endif #ifndef OPENSSL_NO_ZUC if (doit[D_EEA3_128_ZUC]) { diff --git a/crypto/evp/build.info b/crypto/evp/build.info index 5faf9a4ef..6eb069275 100644 --- a/crypto/evp/build.info +++ b/crypto/evp/build.info @@ -8,6 +8,7 @@ SOURCE[../../libcrypto]=$COMMON\ encode.c evp_key.c evp_cnf.c \ e_des.c e_des3.c \ e_rc4.c e_aes.c names.c e_sm4.c \ + e_wbsm4_xiaolai.c e_wbsm4_baiwu.c e_wbsm4_wsise.c \ e_xcbc_d.c e_rc5.c m_null.c \ p_seal.c p_sign.c p_verify.c p_legacy.c \ bio_md.c bio_b64.c bio_enc.c evp_err.c e_null.c \ diff --git a/crypto/evp/c_allc.c b/crypto/evp/c_allc.c index 4bcd4db69..92197edb6 100644 --- a/crypto/evp/c_allc.c +++ b/crypto/evp/c_allc.c @@ -73,6 +73,38 @@ void openssl_add_all_ciphers_int(void) EVP_add_cipher(EVP_sm4_ccm()); #endif +#ifndef OPENSSL_NO_WBSM4 + EVP_add_cipher(EVP_wbsm4_xiaolai_ecb()); + EVP_add_cipher(EVP_wbsm4_xiaolai_cbc()); + EVP_add_cipher(EVP_wbsm4_xiaolai_cfb()); + EVP_add_cipher(EVP_wbsm4_xiaolai_ofb()); + EVP_add_cipher(EVP_wbsm4_xiaolai_ctr()); + EVP_add_cipher_alias(SN_wbsm4_xiaolai_cbc, "WBSM4-XIAOLAI"); + EVP_add_cipher_alias(SN_wbsm4_xiaolai_cbc, "wbsm4-xiaolai"); + EVP_add_cipher(EVP_wbsm4_xiaolai_gcm()); + EVP_add_cipher(EVP_wbsm4_xiaolai_ccm()); + + EVP_add_cipher(EVP_wbsm4_baiwu_ecb()); + EVP_add_cipher(EVP_wbsm4_baiwu_cbc()); + EVP_add_cipher(EVP_wbsm4_baiwu_cfb()); + EVP_add_cipher(EVP_wbsm4_baiwu_ofb()); + EVP_add_cipher(EVP_wbsm4_baiwu_ctr()); + EVP_add_cipher_alias(SN_wbsm4_baiwu_cbc, "WBSM4-BAIWU"); + EVP_add_cipher_alias(SN_wbsm4_baiwu_cbc, "wbsm4-baiwu"); + EVP_add_cipher(EVP_wbsm4_baiwu_gcm()); + EVP_add_cipher(EVP_wbsm4_baiwu_ccm()); + + EVP_add_cipher(EVP_wbsm4_wsise_ecb()); + EVP_add_cipher(EVP_wbsm4_wsise_cbc()); + EVP_add_cipher(EVP_wbsm4_wsise_cfb()); + EVP_add_cipher(EVP_wbsm4_wsise_ofb()); + EVP_add_cipher(EVP_wbsm4_wsise_ctr()); + EVP_add_cipher_alias(SN_wbsm4_wsise_cbc, "WBSM4-WSISE"); + EVP_add_cipher_alias(SN_wbsm4_wsise_cbc, "wbsm4-wsise"); + EVP_add_cipher(EVP_wbsm4_wsise_gcm()); + EVP_add_cipher(EVP_wbsm4_wsise_ccm()); +#endif + #ifndef OPENSSL_NO_RC5 EVP_add_cipher(EVP_rc5_32_12_16_ecb()); EVP_add_cipher(EVP_rc5_32_12_16_cfb()); diff --git a/crypto/evp/e_wbsm4_baiwu.c b/crypto/evp/e_wbsm4_baiwu.c new file mode 100644 index 000000000..55766fde6 --- /dev/null +++ b/crypto/evp/e_wbsm4_baiwu.c @@ -0,0 +1,749 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2017 Ribose Inc. All Rights Reserved. + * Ported from Ribose contributions from Botan. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include "internal/deprecated.h" + +#include "internal/cryptlib.h" +#ifndef OPENSSL_NO_WBSM4 +# include +# include "crypto/wbsm4.h" +# include "crypto/evp.h" +# include "crypto/modes.h" +# include "evp_local.h" + +typedef struct { + union { + OSSL_UNION_ALIGN; + wbsm4_baiwu_key ks; + } ks; + block128_f block; +} EVP_WBSM4_BAIWU_KEY; + +# define BLOCK_CIPHER_generic(nid,blocksize,ivlen,nmode,mode,MODE,flags) \ +static const EVP_CIPHER wbsm4_baiwu_##mode = { \ + nid##_##nmode,blocksize,sizeof(wbsm4_baiwu_key),ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_baiwu_init_key, \ + wbsm4_baiwu_##mode##_cipher, \ + NULL, \ + sizeof(EVP_WBSM4_BAIWU_KEY), \ + NULL,NULL,NULL,NULL }; \ +const EVP_CIPHER *EVP_wbsm4_baiwu_##mode(void) \ +{ return &wbsm4_baiwu_##mode; } + +#define DEFINE_BLOCK_CIPHERS(nid,flags) \ + BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ctr,ctr,CTR,flags) + +static int wbsm4_baiwu_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_WBSM4_BAIWU_KEY *dat = EVP_C_DATA(EVP_WBSM4_BAIWU_KEY,ctx); + + if (!enc) { + ERR_raise(ERR_LIB_EVP, EVP_R_BAD_DECRYPT); + return 0; + } + + dat->block = (block128_f)wbsm4_baiwu_encrypt; + wbsm4_baiwu_set_key(key, &dat->ks.ks); + + return 1; +} + +static int wbsm4_baiwu_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_BAIWU_KEY *dat = EVP_C_DATA(EVP_WBSM4_BAIWU_KEY,ctx); + + CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, + dat->block); + + return 1; +} + +static int wbsm4_baiwu_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_BAIWU_KEY *dat = EVP_C_DATA(EVP_WBSM4_BAIWU_KEY,ctx); + int num = EVP_CIPHER_CTX_get_num(ctx); + + CRYPTO_cfb128_encrypt(in, out, len, &dat->ks, + ctx->iv, &num, + EVP_CIPHER_CTX_is_encrypting(ctx), dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +static int wbsm4_baiwu_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + size_t bl = EVP_CIPHER_CTX_get_block_size(ctx); + size_t i; + EVP_WBSM4_BAIWU_KEY *dat = EVP_C_DATA(EVP_WBSM4_BAIWU_KEY,ctx); + + if (len < bl) + return 1; + + for (i = 0, len -= bl; i <= len; i += bl) + (*dat->block) (in + i, out + i, &dat->ks); + + return 1; +} + +static int wbsm4_baiwu_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_BAIWU_KEY *dat = EVP_C_DATA(EVP_WBSM4_BAIWU_KEY,ctx); + int num = EVP_CIPHER_CTX_get_num(ctx); + + CRYPTO_ofb128_encrypt(in, out, len, &dat->ks, + ctx->iv, &num, dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +static int wbsm4_baiwu_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + int n = EVP_CIPHER_CTX_get_num(ctx); + unsigned int num; + EVP_WBSM4_BAIWU_KEY *dat = EVP_C_DATA(EVP_WBSM4_BAIWU_KEY,ctx); + + if (n < 0) + return 0; + num = (unsigned int)n; + + CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, + ctx->iv, + EVP_CIPHER_CTX_buf_noconst(ctx), &num, + dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +DEFINE_BLOCK_CIPHERS(NID_wbsm4_baiwu, 0) + +# define BLOCK_CIPHER_custom(nid,blocksize,ivlen,mode,MODE,flags) \ +static const EVP_CIPHER wbsm4_baiwu_##mode = { \ + nid##_##mode,blocksize, sizeof(wbsm4_baiwu_key), ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_baiwu_##mode##_init, \ + wbsm4_baiwu_##mode##_cipher, \ + wbsm4_baiwu_##mode##_cleanup, \ + sizeof(EVP_SM4_##MODE##_CTX), \ + NULL,NULL,wbsm4_baiwu_##mode##_ctrl,NULL }; \ +const EVP_CIPHER *EVP_wbsm4_baiwu_##mode(void) \ +{ return &wbsm4_baiwu_##mode; } + +typedef struct { + wbsm4_baiwu_key ks; /* WBSM4 key schedule to use */ + int key_set; /* Set if key initialized */ + int iv_set; /* Set if an iv is set */ + GCM128_CONTEXT gcm; + unsigned char *iv; /* Temporary IV store */ + int ivlen; /* IV length */ + int taglen; + int iv_gen; /* It is OK to generate IVs */ + int tls_aad_len; /* TLS AAD length */ + ctr128_f ctr; +} EVP_SM4_GCM_CTX; + +typedef struct { + wbsm4_baiwu_key ks; /* WBSM4 key schedule to use */ + int key_set; /* Set if key initialized */ + int iv_set; /* Set if an iv is set */ + int tag_set; /* Set if tag is valid */ + int len_set; /* Set if message length set */ + int L, M; /* L and M parameters from RFC3610 */ + int tls_aad_len; /* TLS AAD length */ + CCM128_CONTEXT ccm; + ccm128_f str; +} EVP_SM4_CCM_CTX; + +static int wbsm4_baiwu_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_baiwu_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc); +static int wbsm4_baiwu_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len); +static int wbsm4_baiwu_gcm_cleanup(EVP_CIPHER_CTX *c); + +static int wbsm4_baiwu_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_baiwu_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc); +static int wbsm4_baiwu_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len); +static int wbsm4_baiwu_ccm_cleanup(EVP_CIPHER_CTX *c); + +/* increment counter (64-bit int) by 1 */ +static void ctr64_inc(unsigned char *counter) +{ + int n = 8; + unsigned char c; + + do { + --n; + c = counter[n]; + ++c; + counter[n] = c; + if (c) + return; + } while (n); +} + +static int wbsm4_baiwu_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,c); + + switch (type) { + case EVP_CTRL_INIT: + gctx->key_set = 0; + gctx->iv_set = 0; + gctx->ivlen = EVP_CIPHER_iv_length(c->cipher); + gctx->iv = c->iv; + gctx->taglen = -1; + gctx->iv_gen = 0; + gctx->tls_aad_len = -1; + return 1; + + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = gctx->ivlen; + return 1; + + case EVP_CTRL_AEAD_SET_IVLEN: + if (arg <= 0) + return 0; + /* Allocate memory for IV if needed */ + if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) { + if (gctx->iv != c->iv) + OPENSSL_free(gctx->iv); + if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) + return 0; + } + gctx->ivlen = arg; + return 1; + + case EVP_CTRL_AEAD_SET_TAG: + if (arg <= 0 || arg > 16 || c->encrypt) + return 0; + memcpy(c->buf, ptr, arg); + gctx->taglen = arg; + return 1; + + case EVP_CTRL_AEAD_GET_TAG: + if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) + return 0; + memcpy(ptr, c->buf, arg); + return 1; + + case EVP_CTRL_GCM_SET_IV_FIXED: + /* Special case: -1 length restores whole IV */ + if (arg == -1) { + memcpy(gctx->iv, ptr, gctx->ivlen); + gctx->iv_gen = 1; + return 1; + } + /* + * Fixed field must be at least 4 bytes and invocation field at least + * 8. + */ + if ((arg < 4) || (gctx->ivlen - arg) < 8) + return 0; + if (arg) + memcpy(gctx->iv, ptr, arg); + if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0) + return 0; + gctx->iv_gen = 1; + return 1; + + case EVP_CTRL_GCM_IV_GEN: + if (gctx->iv_gen == 0 || gctx->key_set == 0) + return 0; + CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen); + if (arg <= 0 || arg > gctx->ivlen) + arg = gctx->ivlen; + memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); + /* + * Invocation field will be at least 8 bytes in size and so no need + * to check wrap around or increment more than last 8 bytes. + */ + ctr64_inc(gctx->iv + gctx->ivlen - 8); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_GCM_SET_IV_INV: + if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) + return 0; + memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); + CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_AEAD_TLS1_AAD: + /* Save the AAD for later use */ + if (arg != EVP_AEAD_TLS1_AAD_LEN) + return 0; + memcpy(c->buf, ptr, arg); + gctx->tls_aad_len = arg; + { + unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1]; + /* Correct length for explicit IV */ + if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN) + return 0; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ + if (!c->encrypt) { + if (len < EVP_GCM_TLS_TAG_LEN) + return 0; + len -= EVP_GCM_TLS_TAG_LEN; + } + c->buf[arg - 2] = len >> 8; + c->buf[arg - 1] = len & 0xff; + } + /* Extra padding: tag appended to record */ + return EVP_GCM_TLS_TAG_LEN; + + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_SM4_GCM_CTX *gctx_out = EVP_C_DATA(EVP_SM4_GCM_CTX,out); + + if (gctx->gcm.key) { + if (gctx->gcm.key != &gctx->ks) + return 0; + gctx_out->gcm.key = &gctx_out->ks; + } + if (gctx->iv == c->iv) + gctx_out->iv = out->iv; + else { + if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) { + return 0; + } + memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); + } + return 1; + } + case EVP_CTRL_AEAD_SET_MAC_KEY: + /* no-op */ + return 1; + default: + return -1; + } + return 1; +} + +static int wbsm4_baiwu_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + + if (iv == NULL && key == NULL) + return 1; + if (key) { + do { + wbsm4_baiwu_set_key(key, &gctx->ks); + CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, + (block128_f)wbsm4_baiwu_encrypt); + gctx->ctr = NULL; + } while (0); + + /* + * If we have an iv can set it directly, otherwise use saved IV. + */ + if (iv == NULL && gctx->iv_set) + iv = gctx->iv; + if (iv) { + CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen); + gctx->iv_set = 1; + } + gctx->key_set = 1; + } else { + /* If key set use IV, otherwise copy */ + if (gctx->key_set) + CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen); + else + memcpy(gctx->iv, iv, gctx->ivlen); + gctx->iv_set = 1; + gctx->iv_gen = 0; + } + return 1; +} + +/* + * Handle TLS GCM packet format. This consists of the last portion of the IV + * followed by the payload and finally the tag. On encrypt generate IV, + * encrypt payload and write the tag. On verify retrieve IV, decrypt payload + * and verify tag. + */ + +static int wbsm4_baiwu_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + int rv = -1; + /* Encrypt/decrypt must be performed in place */ + if (out != in + || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN)) + return -1; + /* + * Set IV from start of buffer or generate IV and write to start of + * buffer. + */ + if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN + : EVP_CTRL_GCM_SET_IV_INV, + EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0) + goto err; + /* Use saved AAD */ + if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len)) + goto err; + /* Fix buffer and length to point to payload */ + in += EVP_GCM_TLS_EXPLICIT_IV_LEN; + out += EVP_GCM_TLS_EXPLICIT_IV_LEN; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN; + if (ctx->encrypt) { + /* Encrypt payload */ + if (gctx->ctr) { + size_t bulk = 0; + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, + in + bulk, + out + bulk, + len - bulk, gctx->ctr)) + goto err; + } else { + size_t bulk = 0; + if (CRYPTO_gcm128_encrypt(&gctx->gcm, + in + bulk, out + bulk, len - bulk)) + goto err; + } + out += len; + /* Finally write tag */ + CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN); + rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN; + } else { + /* Decrypt */ + if (gctx->ctr) { + size_t bulk = 0; + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, + in + bulk, + out + bulk, + len - bulk, gctx->ctr)) + goto err; + } else { + size_t bulk = 0; + if (CRYPTO_gcm128_decrypt(&gctx->gcm, + in + bulk, out + bulk, len - bulk)) + goto err; + } + /* Retrieve tag */ + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN); + /* If tag mismatch wipe buffer */ + if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) { + OPENSSL_cleanse(out, len); + goto err; + } + rv = len; + } + + err: + gctx->iv_set = 0; + gctx->tls_aad_len = -1; + return rv; +} + +static int wbsm4_baiwu_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + + /* If not set up, return error */ + if (!gctx->key_set) + return -1; + + if (gctx->tls_aad_len >= 0) + return wbsm4_baiwu_gcm_tls_cipher(ctx, out, in, len); + + if (!gctx->iv_set) + return -1; + + if (in != NULL) { + if (out == NULL) { + if (CRYPTO_gcm128_aad(&gctx->gcm, in, len)) + return -1; + } else if (ctx->encrypt) { + if (gctx->ctr != NULL) { + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + return -1; + } else { + if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, len)) + return -1; + } + } else { + if (gctx->ctr != NULL) { + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + return -1; + } else { + if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, len)) + return -1; + } + } + return len; + } else { + if (!ctx->encrypt) { + if (gctx->taglen < 0) + return -1; + if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0) + return -1; + gctx->iv_set = 0; + return 0; + } + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16); + gctx->taglen = 16; + /* Don't reuse the IV */ + gctx->iv_set = 0; + return 0; + } +} + +static int wbsm4_baiwu_gcm_cleanup(EVP_CIPHER_CTX *c) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX, c); + const unsigned char *iv; + + if (gctx == NULL) + return 0; + + iv = EVP_CIPHER_CTX_iv(c); + if (iv != gctx->iv) + OPENSSL_free(gctx->iv); + + OPENSSL_cleanse(gctx, sizeof(*gctx)); + return 1; +} + +static int wbsm4_baiwu_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,c); + + switch (type) { + case EVP_CTRL_INIT: + cctx->key_set = 0; + cctx->iv_set = 0; + cctx->L = 8; + cctx->M = 12; + cctx->tag_set = 0; + cctx->len_set = 0; + cctx->tls_aad_len = -1; + return 1; + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = 15 - cctx->L; + return 1; + case EVP_CTRL_AEAD_TLS1_AAD: + /* Save the AAD for later use */ + if (arg != EVP_AEAD_TLS1_AAD_LEN) + return 0; + memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg); + cctx->tls_aad_len = arg; + { + uint16_t len = + EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8 + | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1]; + + /* Correct length for explicit IV */ + if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN) + return 0; + len -= EVP_CCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ + if (!EVP_CIPHER_CTX_encrypting(c)) { + if (len < cctx->M) + return 0; + len -= cctx->M; + } + EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8; + EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff; + } + /* Extra padding: tag appended to record */ + return cctx->M; + + case EVP_CTRL_CCM_SET_IV_FIXED: + /* Sanity check length */ + if (arg != EVP_CCM_TLS_FIXED_IV_LEN) + return 0; + /* Just copy to first part of IV */ + memcpy(EVP_CIPHER_CTX_iv_noconst(c), ptr, arg); + return 1; + + case EVP_CTRL_AEAD_SET_IVLEN: + arg = 15 - arg; + /* fall thru */ + case EVP_CTRL_CCM_SET_L: + if (arg < 2 || arg > 8) + return 0; + cctx->L = arg; + return 1; + + case EVP_CTRL_AEAD_SET_TAG: + if ((arg & 1) || arg < 4 || arg > 16) + return 0; + if (EVP_CIPHER_CTX_encrypting(c) && ptr) + return 0; + if (ptr) { + cctx->tag_set = 1; + memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg); + } + cctx->M = arg; + return 1; + + case EVP_CTRL_AEAD_GET_TAG: + if (!EVP_CIPHER_CTX_encrypting(c) || !cctx->tag_set) + return 0; + if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg)) + return 0; + cctx->tag_set = 0; + cctx->iv_set = 0; + cctx->len_set = 0; + return 1; + + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_SM4_CCM_CTX *cctx_out = EVP_C_DATA(EVP_SM4_CCM_CTX,out); + + if (cctx->ccm.key) { + if (cctx->ccm.key != &cctx->ks) + return 0; + cctx_out->ccm.key = &cctx_out->ks; + } + return 1; + } + + default: + return -1; + + } +} + +static int wbsm4_baiwu_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,ctx); + + if (iv == NULL && key == NULL) + return 1; + if (key != NULL) + do { + wbsm4_baiwu_set_key(key, &cctx->ks); + CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L, + &cctx->ks, (block128_f)wbsm4_baiwu_encrypt); + cctx->str = NULL; + cctx->key_set = 1; + } while (0); + if (iv != NULL) { + memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L); + cctx->iv_set = 1; + } + return 1; +} + +static int wbsm4_baiwu_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,ctx); + CCM128_CONTEXT *ccm = &cctx->ccm; + + /* If not set up, return error */ + if (!cctx->key_set) + return -1; + + /* EVP_*Final() doesn't return any data */ + if (in == NULL && out != NULL) + return 0; + + if (!cctx->iv_set) + return -1; + + if (out == NULL) { + if (in == NULL) { + if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), + 15 - cctx->L, len)) + return -1; + cctx->len_set = 1; + return len; + } + /* If have AAD need message length */ + if (!cctx->len_set && len) + return -1; + CRYPTO_ccm128_aad(ccm, in, len); + return len; + } + + /* The tag must be set before actually decrypting data */ + if (!EVP_CIPHER_CTX_encrypting(ctx) && !cctx->tag_set) + return -1; + + /* If not set length yet do it */ + if (!cctx->len_set) { + if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), + 15 - cctx->L, len)) + return -1; + cctx->len_set = 1; + } + if (EVP_CIPHER_CTX_encrypting(ctx)) { + if (cctx->str != NULL ? + CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len, + cctx->str) : + CRYPTO_ccm128_encrypt(ccm, in, out, len)) + return -1; + cctx->tag_set = 1; + return len; + } else { + int rv = -1; + + if (cctx->str != NULL ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len, + cctx->str) : + !CRYPTO_ccm128_decrypt(ccm, in, out, len)) { + unsigned char tag[16]; + if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) { + if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx), + cctx->M)) + rv = len; + } + } + if (rv == -1) + OPENSSL_cleanse(out, len); + cctx->iv_set = 0; + cctx->tag_set = 0; + cctx->len_set = 0; + return rv; + } + +} + +static int wbsm4_baiwu_ccm_cleanup(EVP_CIPHER_CTX *c) +{ + return 1; +} + +#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ + | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH) + +BLOCK_CIPHER_custom(NID_wbsm4_baiwu, 1, 12, gcm, GCM, + EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS) +BLOCK_CIPHER_custom(NID_wbsm4_baiwu, 1, 12, ccm, CCM, + EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS) +#endif diff --git a/crypto/evp/e_wbsm4_wsise.c b/crypto/evp/e_wbsm4_wsise.c new file mode 100644 index 000000000..7e853e4ab --- /dev/null +++ b/crypto/evp/e_wbsm4_wsise.c @@ -0,0 +1,749 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2017 Ribose Inc. All Rights Reserved. + * Ported from Ribose contributions from Botan. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include "internal/deprecated.h" + +#include "internal/cryptlib.h" +#ifndef OPENSSL_NO_WBSM4 +# include +# include "crypto/wbsm4.h" +# include "crypto/evp.h" +# include "crypto/modes.h" +# include "evp_local.h" + +typedef struct { + union { + OSSL_UNION_ALIGN; + wbsm4_wsise_key ks; + } ks; + block128_f block; +} EVP_WBSM4_WSISE_KEY; + +# define BLOCK_CIPHER_generic(nid,blocksize,ivlen,nmode,mode,MODE,flags) \ +static const EVP_CIPHER wbsm4_wsise_##mode = { \ + nid##_##nmode,blocksize,sizeof(wbsm4_wsise_key),ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_wsise_init_key, \ + wbsm4_wsise_##mode##_cipher, \ + NULL, \ + sizeof(EVP_WBSM4_WSISE_KEY), \ + NULL,NULL,NULL,NULL }; \ +const EVP_CIPHER *EVP_wbsm4_wsise_##mode(void) \ +{ return &wbsm4_wsise_##mode; } + +#define DEFINE_BLOCK_CIPHERS(nid,flags) \ + BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ctr,ctr,CTR,flags) + +static int wbsm4_wsise_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_WBSM4_WSISE_KEY *dat = EVP_C_DATA(EVP_WBSM4_WSISE_KEY,ctx); + + if (!enc) { + ERR_raise(ERR_LIB_EVP, EVP_R_BAD_DECRYPT); + return 0; + } + + dat->block = (block128_f)wbsm4_wsise_encrypt; + wbsm4_wsise_set_key(key, &dat->ks.ks); + + return 1; +} + +static int wbsm4_wsise_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_WSISE_KEY *dat = EVP_C_DATA(EVP_WBSM4_WSISE_KEY,ctx); + + CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, + dat->block); + + return 1; +} + +static int wbsm4_wsise_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_WSISE_KEY *dat = EVP_C_DATA(EVP_WBSM4_WSISE_KEY,ctx); + int num = EVP_CIPHER_CTX_get_num(ctx); + + CRYPTO_cfb128_encrypt(in, out, len, &dat->ks, + ctx->iv, &num, + EVP_CIPHER_CTX_is_encrypting(ctx), dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +static int wbsm4_wsise_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + size_t bl = EVP_CIPHER_CTX_get_block_size(ctx); + size_t i; + EVP_WBSM4_WSISE_KEY *dat = EVP_C_DATA(EVP_WBSM4_WSISE_KEY,ctx); + + if (len < bl) + return 1; + + for (i = 0, len -= bl; i <= len; i += bl) + (*dat->block) (in + i, out + i, &dat->ks); + + return 1; +} + +static int wbsm4_wsise_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_WSISE_KEY *dat = EVP_C_DATA(EVP_WBSM4_WSISE_KEY,ctx); + int num = EVP_CIPHER_CTX_get_num(ctx); + + CRYPTO_ofb128_encrypt(in, out, len, &dat->ks, + ctx->iv, &num, dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +static int wbsm4_wsise_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + int n = EVP_CIPHER_CTX_get_num(ctx); + unsigned int num; + EVP_WBSM4_WSISE_KEY *dat = EVP_C_DATA(EVP_WBSM4_WSISE_KEY,ctx); + + if (n < 0) + return 0; + num = (unsigned int)n; + + CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, + ctx->iv, + EVP_CIPHER_CTX_buf_noconst(ctx), &num, + dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +DEFINE_BLOCK_CIPHERS(NID_wbsm4_wsise, 0) + +# define BLOCK_CIPHER_custom(nid,blocksize,ivlen,mode,MODE,flags) \ +static const EVP_CIPHER wbsm4_wsise_##mode = { \ + nid##_##mode,blocksize, sizeof(wbsm4_wsise_key), ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_wsise_##mode##_init, \ + wbsm4_wsise_##mode##_cipher, \ + wbsm4_wsise_##mode##_cleanup, \ + sizeof(EVP_SM4_##MODE##_CTX), \ + NULL,NULL,wbsm4_wsise_##mode##_ctrl,NULL }; \ +const EVP_CIPHER *EVP_wbsm4_wsise_##mode(void) \ +{ return &wbsm4_wsise_##mode; } + +typedef struct { + wbsm4_wsise_key ks; /* WBSM4 key schedule to use */ + int key_set; /* Set if key initialized */ + int iv_set; /* Set if an iv is set */ + GCM128_CONTEXT gcm; + unsigned char *iv; /* Temporary IV store */ + int ivlen; /* IV length */ + int taglen; + int iv_gen; /* It is OK to generate IVs */ + int tls_aad_len; /* TLS AAD length */ + ctr128_f ctr; +} EVP_SM4_GCM_CTX; + +typedef struct { + wbsm4_wsise_key ks; /* WBSM4 key schedule to use */ + int key_set; /* Set if key initialized */ + int iv_set; /* Set if an iv is set */ + int tag_set; /* Set if tag is valid */ + int len_set; /* Set if message length set */ + int L, M; /* L and M parameters from RFC3610 */ + int tls_aad_len; /* TLS AAD length */ + CCM128_CONTEXT ccm; + ccm128_f str; +} EVP_SM4_CCM_CTX; + +static int wbsm4_wsise_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_wsise_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc); +static int wbsm4_wsise_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len); +static int wbsm4_wsise_gcm_cleanup(EVP_CIPHER_CTX *c); + +static int wbsm4_wsise_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_wsise_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc); +static int wbsm4_wsise_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len); +static int wbsm4_wsise_ccm_cleanup(EVP_CIPHER_CTX *c); + +/* increment counter (64-bit int) by 1 */ +static void ctr64_inc(unsigned char *counter) +{ + int n = 8; + unsigned char c; + + do { + --n; + c = counter[n]; + ++c; + counter[n] = c; + if (c) + return; + } while (n); +} + +static int wbsm4_wsise_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,c); + + switch (type) { + case EVP_CTRL_INIT: + gctx->key_set = 0; + gctx->iv_set = 0; + gctx->ivlen = EVP_CIPHER_iv_length(c->cipher); + gctx->iv = c->iv; + gctx->taglen = -1; + gctx->iv_gen = 0; + gctx->tls_aad_len = -1; + return 1; + + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = gctx->ivlen; + return 1; + + case EVP_CTRL_AEAD_SET_IVLEN: + if (arg <= 0) + return 0; + /* Allocate memory for IV if needed */ + if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) { + if (gctx->iv != c->iv) + OPENSSL_free(gctx->iv); + if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) + return 0; + } + gctx->ivlen = arg; + return 1; + + case EVP_CTRL_AEAD_SET_TAG: + if (arg <= 0 || arg > 16 || c->encrypt) + return 0; + memcpy(c->buf, ptr, arg); + gctx->taglen = arg; + return 1; + + case EVP_CTRL_AEAD_GET_TAG: + if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) + return 0; + memcpy(ptr, c->buf, arg); + return 1; + + case EVP_CTRL_GCM_SET_IV_FIXED: + /* Special case: -1 length restores whole IV */ + if (arg == -1) { + memcpy(gctx->iv, ptr, gctx->ivlen); + gctx->iv_gen = 1; + return 1; + } + /* + * Fixed field must be at least 4 bytes and invocation field at least + * 8. + */ + if ((arg < 4) || (gctx->ivlen - arg) < 8) + return 0; + if (arg) + memcpy(gctx->iv, ptr, arg); + if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0) + return 0; + gctx->iv_gen = 1; + return 1; + + case EVP_CTRL_GCM_IV_GEN: + if (gctx->iv_gen == 0 || gctx->key_set == 0) + return 0; + CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen); + if (arg <= 0 || arg > gctx->ivlen) + arg = gctx->ivlen; + memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); + /* + * Invocation field will be at least 8 bytes in size and so no need + * to check wrap around or increment more than last 8 bytes. + */ + ctr64_inc(gctx->iv + gctx->ivlen - 8); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_GCM_SET_IV_INV: + if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) + return 0; + memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); + CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_AEAD_TLS1_AAD: + /* Save the AAD for later use */ + if (arg != EVP_AEAD_TLS1_AAD_LEN) + return 0; + memcpy(c->buf, ptr, arg); + gctx->tls_aad_len = arg; + { + unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1]; + /* Correct length for explicit IV */ + if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN) + return 0; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ + if (!c->encrypt) { + if (len < EVP_GCM_TLS_TAG_LEN) + return 0; + len -= EVP_GCM_TLS_TAG_LEN; + } + c->buf[arg - 2] = len >> 8; + c->buf[arg - 1] = len & 0xff; + } + /* Extra padding: tag appended to record */ + return EVP_GCM_TLS_TAG_LEN; + + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_SM4_GCM_CTX *gctx_out = EVP_C_DATA(EVP_SM4_GCM_CTX,out); + + if (gctx->gcm.key) { + if (gctx->gcm.key != &gctx->ks) + return 0; + gctx_out->gcm.key = &gctx_out->ks; + } + if (gctx->iv == c->iv) + gctx_out->iv = out->iv; + else { + if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) { + return 0; + } + memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); + } + return 1; + } + case EVP_CTRL_AEAD_SET_MAC_KEY: + /* no-op */ + return 1; + default: + return -1; + } + return 1; +} + +static int wbsm4_wsise_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + + if (iv == NULL && key == NULL) + return 1; + if (key) { + do { + wbsm4_wsise_set_key(key, &gctx->ks); + CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, + (block128_f)wbsm4_wsise_encrypt); + gctx->ctr = NULL; + } while (0); + + /* + * If we have an iv can set it directly, otherwise use saved IV. + */ + if (iv == NULL && gctx->iv_set) + iv = gctx->iv; + if (iv) { + CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen); + gctx->iv_set = 1; + } + gctx->key_set = 1; + } else { + /* If key set use IV, otherwise copy */ + if (gctx->key_set) + CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen); + else + memcpy(gctx->iv, iv, gctx->ivlen); + gctx->iv_set = 1; + gctx->iv_gen = 0; + } + return 1; +} + +/* + * Handle TLS GCM packet format. This consists of the last portion of the IV + * followed by the payload and finally the tag. On encrypt generate IV, + * encrypt payload and write the tag. On verify retrieve IV, decrypt payload + * and verify tag. + */ + +static int wbsm4_wsise_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + int rv = -1; + /* Encrypt/decrypt must be performed in place */ + if (out != in + || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN)) + return -1; + /* + * Set IV from start of buffer or generate IV and write to start of + * buffer. + */ + if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN + : EVP_CTRL_GCM_SET_IV_INV, + EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0) + goto err; + /* Use saved AAD */ + if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len)) + goto err; + /* Fix buffer and length to point to payload */ + in += EVP_GCM_TLS_EXPLICIT_IV_LEN; + out += EVP_GCM_TLS_EXPLICIT_IV_LEN; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN; + if (ctx->encrypt) { + /* Encrypt payload */ + if (gctx->ctr) { + size_t bulk = 0; + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, + in + bulk, + out + bulk, + len - bulk, gctx->ctr)) + goto err; + } else { + size_t bulk = 0; + if (CRYPTO_gcm128_encrypt(&gctx->gcm, + in + bulk, out + bulk, len - bulk)) + goto err; + } + out += len; + /* Finally write tag */ + CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN); + rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN; + } else { + /* Decrypt */ + if (gctx->ctr) { + size_t bulk = 0; + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, + in + bulk, + out + bulk, + len - bulk, gctx->ctr)) + goto err; + } else { + size_t bulk = 0; + if (CRYPTO_gcm128_decrypt(&gctx->gcm, + in + bulk, out + bulk, len - bulk)) + goto err; + } + /* Retrieve tag */ + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN); + /* If tag mismatch wipe buffer */ + if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) { + OPENSSL_cleanse(out, len); + goto err; + } + rv = len; + } + + err: + gctx->iv_set = 0; + gctx->tls_aad_len = -1; + return rv; +} + +static int wbsm4_wsise_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + + /* If not set up, return error */ + if (!gctx->key_set) + return -1; + + if (gctx->tls_aad_len >= 0) + return wbsm4_wsise_gcm_tls_cipher(ctx, out, in, len); + + if (!gctx->iv_set) + return -1; + + if (in != NULL) { + if (out == NULL) { + if (CRYPTO_gcm128_aad(&gctx->gcm, in, len)) + return -1; + } else if (ctx->encrypt) { + if (gctx->ctr != NULL) { + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + return -1; + } else { + if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, len)) + return -1; + } + } else { + if (gctx->ctr != NULL) { + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + return -1; + } else { + if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, len)) + return -1; + } + } + return len; + } else { + if (!ctx->encrypt) { + if (gctx->taglen < 0) + return -1; + if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0) + return -1; + gctx->iv_set = 0; + return 0; + } + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16); + gctx->taglen = 16; + /* Don't reuse the IV */ + gctx->iv_set = 0; + return 0; + } +} + +static int wbsm4_wsise_gcm_cleanup(EVP_CIPHER_CTX *c) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX, c); + const unsigned char *iv; + + if (gctx == NULL) + return 0; + + iv = EVP_CIPHER_CTX_iv(c); + if (iv != gctx->iv) + OPENSSL_free(gctx->iv); + + OPENSSL_cleanse(gctx, sizeof(*gctx)); + return 1; +} + +static int wbsm4_wsise_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,c); + + switch (type) { + case EVP_CTRL_INIT: + cctx->key_set = 0; + cctx->iv_set = 0; + cctx->L = 8; + cctx->M = 12; + cctx->tag_set = 0; + cctx->len_set = 0; + cctx->tls_aad_len = -1; + return 1; + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = 15 - cctx->L; + return 1; + case EVP_CTRL_AEAD_TLS1_AAD: + /* Save the AAD for later use */ + if (arg != EVP_AEAD_TLS1_AAD_LEN) + return 0; + memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg); + cctx->tls_aad_len = arg; + { + uint16_t len = + EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8 + | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1]; + + /* Correct length for explicit IV */ + if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN) + return 0; + len -= EVP_CCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ + if (!EVP_CIPHER_CTX_encrypting(c)) { + if (len < cctx->M) + return 0; + len -= cctx->M; + } + EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8; + EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff; + } + /* Extra padding: tag appended to record */ + return cctx->M; + + case EVP_CTRL_CCM_SET_IV_FIXED: + /* Sanity check length */ + if (arg != EVP_CCM_TLS_FIXED_IV_LEN) + return 0; + /* Just copy to first part of IV */ + memcpy(EVP_CIPHER_CTX_iv_noconst(c), ptr, arg); + return 1; + + case EVP_CTRL_AEAD_SET_IVLEN: + arg = 15 - arg; + /* fall thru */ + case EVP_CTRL_CCM_SET_L: + if (arg < 2 || arg > 8) + return 0; + cctx->L = arg; + return 1; + + case EVP_CTRL_AEAD_SET_TAG: + if ((arg & 1) || arg < 4 || arg > 16) + return 0; + if (EVP_CIPHER_CTX_encrypting(c) && ptr) + return 0; + if (ptr) { + cctx->tag_set = 1; + memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg); + } + cctx->M = arg; + return 1; + + case EVP_CTRL_AEAD_GET_TAG: + if (!EVP_CIPHER_CTX_encrypting(c) || !cctx->tag_set) + return 0; + if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg)) + return 0; + cctx->tag_set = 0; + cctx->iv_set = 0; + cctx->len_set = 0; + return 1; + + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_SM4_CCM_CTX *cctx_out = EVP_C_DATA(EVP_SM4_CCM_CTX,out); + + if (cctx->ccm.key) { + if (cctx->ccm.key != &cctx->ks) + return 0; + cctx_out->ccm.key = &cctx_out->ks; + } + return 1; + } + + default: + return -1; + + } +} + +static int wbsm4_wsise_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,ctx); + + if (iv == NULL && key == NULL) + return 1; + if (key != NULL) + do { + wbsm4_wsise_set_key(key, &cctx->ks); + CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L, + &cctx->ks, (block128_f)wbsm4_wsise_encrypt); + cctx->str = NULL; + cctx->key_set = 1; + } while (0); + if (iv != NULL) { + memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L); + cctx->iv_set = 1; + } + return 1; +} + +static int wbsm4_wsise_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,ctx); + CCM128_CONTEXT *ccm = &cctx->ccm; + + /* If not set up, return error */ + if (!cctx->key_set) + return -1; + + /* EVP_*Final() doesn't return any data */ + if (in == NULL && out != NULL) + return 0; + + if (!cctx->iv_set) + return -1; + + if (out == NULL) { + if (in == NULL) { + if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), + 15 - cctx->L, len)) + return -1; + cctx->len_set = 1; + return len; + } + /* If have AAD need message length */ + if (!cctx->len_set && len) + return -1; + CRYPTO_ccm128_aad(ccm, in, len); + return len; + } + + /* The tag must be set before actually decrypting data */ + if (!EVP_CIPHER_CTX_encrypting(ctx) && !cctx->tag_set) + return -1; + + /* If not set length yet do it */ + if (!cctx->len_set) { + if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), + 15 - cctx->L, len)) + return -1; + cctx->len_set = 1; + } + if (EVP_CIPHER_CTX_encrypting(ctx)) { + if (cctx->str != NULL ? + CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len, + cctx->str) : + CRYPTO_ccm128_encrypt(ccm, in, out, len)) + return -1; + cctx->tag_set = 1; + return len; + } else { + int rv = -1; + + if (cctx->str != NULL ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len, + cctx->str) : + !CRYPTO_ccm128_decrypt(ccm, in, out, len)) { + unsigned char tag[16]; + if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) { + if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx), + cctx->M)) + rv = len; + } + } + if (rv == -1) + OPENSSL_cleanse(out, len); + cctx->iv_set = 0; + cctx->tag_set = 0; + cctx->len_set = 0; + return rv; + } + +} + +static int wbsm4_wsise_ccm_cleanup(EVP_CIPHER_CTX *c) +{ + return 1; +} + +#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ + | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH) + +BLOCK_CIPHER_custom(NID_wbsm4_wsise, 1, 12, gcm, GCM, + EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS) +BLOCK_CIPHER_custom(NID_wbsm4_wsise, 1, 12, ccm, CCM, + EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS) +#endif diff --git a/crypto/evp/e_wbsm4_xiaolai.c b/crypto/evp/e_wbsm4_xiaolai.c new file mode 100644 index 000000000..0c1830e9e --- /dev/null +++ b/crypto/evp/e_wbsm4_xiaolai.c @@ -0,0 +1,749 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2017 Ribose Inc. All Rights Reserved. + * Ported from Ribose contributions from Botan. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include "internal/deprecated.h" + +#include "internal/cryptlib.h" +#ifndef OPENSSL_NO_WBSM4 +# include +# include "crypto/wbsm4.h" +# include "crypto/evp.h" +# include "crypto/modes.h" +# include "evp_local.h" + +typedef struct { + union { + OSSL_UNION_ALIGN; + wbsm4_xiaolai_key ks; + } ks; + block128_f block; +} EVP_WBSM4_XIAOLAI_KEY; + +# define BLOCK_CIPHER_generic(nid,blocksize,ivlen,nmode,mode,MODE,flags) \ +static const EVP_CIPHER wbsm4_xiaolai_##mode = { \ + nid##_##nmode,blocksize,sizeof(wbsm4_xiaolai_key),ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_xiaolai_init_key, \ + wbsm4_xiaolai_##mode##_cipher, \ + NULL, \ + sizeof(EVP_WBSM4_XIAOLAI_KEY), \ + NULL,NULL,NULL,NULL }; \ +const EVP_CIPHER *EVP_wbsm4_xiaolai_##mode(void) \ +{ return &wbsm4_xiaolai_##mode; } + +#define DEFINE_BLOCK_CIPHERS(nid,flags) \ + BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ctr,ctr,CTR,flags) + +static int wbsm4_xiaolai_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_WBSM4_XIAOLAI_KEY *dat = EVP_C_DATA(EVP_WBSM4_XIAOLAI_KEY,ctx); + + if (!enc) { + ERR_raise(ERR_LIB_EVP, EVP_R_BAD_DECRYPT); + return 0; + } + + dat->block = (block128_f)wbsm4_xiaolai_encrypt; + wbsm4_xiaolai_set_key(key, &dat->ks.ks); + + return 1; +} + +static int wbsm4_xiaolai_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_XIAOLAI_KEY *dat = EVP_C_DATA(EVP_WBSM4_XIAOLAI_KEY,ctx); + + CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, + dat->block); + + return 1; +} + +static int wbsm4_xiaolai_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_XIAOLAI_KEY *dat = EVP_C_DATA(EVP_WBSM4_XIAOLAI_KEY,ctx); + int num = EVP_CIPHER_CTX_get_num(ctx); + + CRYPTO_cfb128_encrypt(in, out, len, &dat->ks, + ctx->iv, &num, + EVP_CIPHER_CTX_is_encrypting(ctx), dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +static int wbsm4_xiaolai_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + size_t bl = EVP_CIPHER_CTX_get_block_size(ctx); + size_t i; + EVP_WBSM4_XIAOLAI_KEY *dat = EVP_C_DATA(EVP_WBSM4_XIAOLAI_KEY,ctx); + + if (len < bl) + return 1; + + for (i = 0, len -= bl; i <= len; i += bl) + (*dat->block) (in + i, out + i, &dat->ks); + + return 1; +} + +static int wbsm4_xiaolai_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_WBSM4_XIAOLAI_KEY *dat = EVP_C_DATA(EVP_WBSM4_XIAOLAI_KEY,ctx); + int num = EVP_CIPHER_CTX_get_num(ctx); + + CRYPTO_ofb128_encrypt(in, out, len, &dat->ks, + ctx->iv, &num, dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +static int wbsm4_xiaolai_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + int n = EVP_CIPHER_CTX_get_num(ctx); + unsigned int num; + EVP_WBSM4_XIAOLAI_KEY *dat = EVP_C_DATA(EVP_WBSM4_XIAOLAI_KEY,ctx); + + if (n < 0) + return 0; + num = (unsigned int)n; + + CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, + ctx->iv, + EVP_CIPHER_CTX_buf_noconst(ctx), &num, + dat->block); + EVP_CIPHER_CTX_set_num(ctx, num); + return 1; +} + +DEFINE_BLOCK_CIPHERS(NID_wbsm4_xiaolai, 0) + +# define BLOCK_CIPHER_custom(nid,blocksize,ivlen,mode,MODE,flags) \ +static const EVP_CIPHER wbsm4_xiaolai_##mode = { \ + nid##_##mode,blocksize, sizeof(wbsm4_xiaolai_key), ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_xiaolai_##mode##_init, \ + wbsm4_xiaolai_##mode##_cipher, \ + wbsm4_xiaolai_##mode##_cleanup, \ + sizeof(EVP_SM4_##MODE##_CTX), \ + NULL,NULL,wbsm4_xiaolai_##mode##_ctrl,NULL }; \ +const EVP_CIPHER *EVP_wbsm4_xiaolai_##mode(void) \ +{ return &wbsm4_xiaolai_##mode; } + +typedef struct { + wbsm4_xiaolai_key ks; /* WBSM4 key schedule to use */ + int key_set; /* Set if key initialized */ + int iv_set; /* Set if an iv is set */ + GCM128_CONTEXT gcm; + unsigned char *iv; /* Temporary IV store */ + int ivlen; /* IV length */ + int taglen; + int iv_gen; /* It is OK to generate IVs */ + int tls_aad_len; /* TLS AAD length */ + ctr128_f ctr; +} EVP_SM4_GCM_CTX; + +typedef struct { + wbsm4_xiaolai_key ks; /* WBSM4 key schedule to use */ + int key_set; /* Set if key initialized */ + int iv_set; /* Set if an iv is set */ + int tag_set; /* Set if tag is valid */ + int len_set; /* Set if message length set */ + int L, M; /* L and M parameters from RFC3610 */ + int tls_aad_len; /* TLS AAD length */ + CCM128_CONTEXT ccm; + ccm128_f str; +} EVP_SM4_CCM_CTX; + +static int wbsm4_xiaolai_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_xiaolai_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc); +static int wbsm4_xiaolai_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len); +static int wbsm4_xiaolai_gcm_cleanup(EVP_CIPHER_CTX *c); + +static int wbsm4_xiaolai_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_xiaolai_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc); +static int wbsm4_xiaolai_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len); +static int wbsm4_xiaolai_ccm_cleanup(EVP_CIPHER_CTX *c); + +/* increment counter (64-bit int) by 1 */ +static void ctr64_inc(unsigned char *counter) +{ + int n = 8; + unsigned char c; + + do { + --n; + c = counter[n]; + ++c; + counter[n] = c; + if (c) + return; + } while (n); +} + +static int wbsm4_xiaolai_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,c); + + switch (type) { + case EVP_CTRL_INIT: + gctx->key_set = 0; + gctx->iv_set = 0; + gctx->ivlen = EVP_CIPHER_iv_length(c->cipher); + gctx->iv = c->iv; + gctx->taglen = -1; + gctx->iv_gen = 0; + gctx->tls_aad_len = -1; + return 1; + + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = gctx->ivlen; + return 1; + + case EVP_CTRL_AEAD_SET_IVLEN: + if (arg <= 0) + return 0; + /* Allocate memory for IV if needed */ + if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) { + if (gctx->iv != c->iv) + OPENSSL_free(gctx->iv); + if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) + return 0; + } + gctx->ivlen = arg; + return 1; + + case EVP_CTRL_AEAD_SET_TAG: + if (arg <= 0 || arg > 16 || c->encrypt) + return 0; + memcpy(c->buf, ptr, arg); + gctx->taglen = arg; + return 1; + + case EVP_CTRL_AEAD_GET_TAG: + if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) + return 0; + memcpy(ptr, c->buf, arg); + return 1; + + case EVP_CTRL_GCM_SET_IV_FIXED: + /* Special case: -1 length restores whole IV */ + if (arg == -1) { + memcpy(gctx->iv, ptr, gctx->ivlen); + gctx->iv_gen = 1; + return 1; + } + /* + * Fixed field must be at least 4 bytes and invocation field at least + * 8. + */ + if ((arg < 4) || (gctx->ivlen - arg) < 8) + return 0; + if (arg) + memcpy(gctx->iv, ptr, arg); + if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0) + return 0; + gctx->iv_gen = 1; + return 1; + + case EVP_CTRL_GCM_IV_GEN: + if (gctx->iv_gen == 0 || gctx->key_set == 0) + return 0; + CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen); + if (arg <= 0 || arg > gctx->ivlen) + arg = gctx->ivlen; + memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); + /* + * Invocation field will be at least 8 bytes in size and so no need + * to check wrap around or increment more than last 8 bytes. + */ + ctr64_inc(gctx->iv + gctx->ivlen - 8); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_GCM_SET_IV_INV: + if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) + return 0; + memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); + CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_AEAD_TLS1_AAD: + /* Save the AAD for later use */ + if (arg != EVP_AEAD_TLS1_AAD_LEN) + return 0; + memcpy(c->buf, ptr, arg); + gctx->tls_aad_len = arg; + { + unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1]; + /* Correct length for explicit IV */ + if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN) + return 0; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ + if (!c->encrypt) { + if (len < EVP_GCM_TLS_TAG_LEN) + return 0; + len -= EVP_GCM_TLS_TAG_LEN; + } + c->buf[arg - 2] = len >> 8; + c->buf[arg - 1] = len & 0xff; + } + /* Extra padding: tag appended to record */ + return EVP_GCM_TLS_TAG_LEN; + + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_SM4_GCM_CTX *gctx_out = EVP_C_DATA(EVP_SM4_GCM_CTX,out); + + if (gctx->gcm.key) { + if (gctx->gcm.key != &gctx->ks) + return 0; + gctx_out->gcm.key = &gctx_out->ks; + } + if (gctx->iv == c->iv) + gctx_out->iv = out->iv; + else { + if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) { + return 0; + } + memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); + } + return 1; + } + case EVP_CTRL_AEAD_SET_MAC_KEY: + /* no-op */ + return 1; + default: + return -1; + } + return 1; +} + +static int wbsm4_xiaolai_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + + if (iv == NULL && key == NULL) + return 1; + if (key) { + do { + wbsm4_xiaolai_set_key(key, &gctx->ks); + CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, + (block128_f)wbsm4_xiaolai_encrypt); + gctx->ctr = NULL; + } while (0); + + /* + * If we have an iv can set it directly, otherwise use saved IV. + */ + if (iv == NULL && gctx->iv_set) + iv = gctx->iv; + if (iv) { + CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen); + gctx->iv_set = 1; + } + gctx->key_set = 1; + } else { + /* If key set use IV, otherwise copy */ + if (gctx->key_set) + CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen); + else + memcpy(gctx->iv, iv, gctx->ivlen); + gctx->iv_set = 1; + gctx->iv_gen = 0; + } + return 1; +} + +/* + * Handle TLS GCM packet format. This consists of the last portion of the IV + * followed by the payload and finally the tag. On encrypt generate IV, + * encrypt payload and write the tag. On verify retrieve IV, decrypt payload + * and verify tag. + */ + +static int wbsm4_xiaolai_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + int rv = -1; + /* Encrypt/decrypt must be performed in place */ + if (out != in + || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN)) + return -1; + /* + * Set IV from start of buffer or generate IV and write to start of + * buffer. + */ + if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN + : EVP_CTRL_GCM_SET_IV_INV, + EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0) + goto err; + /* Use saved AAD */ + if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len)) + goto err; + /* Fix buffer and length to point to payload */ + in += EVP_GCM_TLS_EXPLICIT_IV_LEN; + out += EVP_GCM_TLS_EXPLICIT_IV_LEN; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN; + if (ctx->encrypt) { + /* Encrypt payload */ + if (gctx->ctr) { + size_t bulk = 0; + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, + in + bulk, + out + bulk, + len - bulk, gctx->ctr)) + goto err; + } else { + size_t bulk = 0; + if (CRYPTO_gcm128_encrypt(&gctx->gcm, + in + bulk, out + bulk, len - bulk)) + goto err; + } + out += len; + /* Finally write tag */ + CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN); + rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN; + } else { + /* Decrypt */ + if (gctx->ctr) { + size_t bulk = 0; + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, + in + bulk, + out + bulk, + len - bulk, gctx->ctr)) + goto err; + } else { + size_t bulk = 0; + if (CRYPTO_gcm128_decrypt(&gctx->gcm, + in + bulk, out + bulk, len - bulk)) + goto err; + } + /* Retrieve tag */ + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN); + /* If tag mismatch wipe buffer */ + if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) { + OPENSSL_cleanse(out, len); + goto err; + } + rv = len; + } + + err: + gctx->iv_set = 0; + gctx->tls_aad_len = -1; + return rv; +} + +static int wbsm4_xiaolai_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,ctx); + + /* If not set up, return error */ + if (!gctx->key_set) + return -1; + + if (gctx->tls_aad_len >= 0) + return wbsm4_xiaolai_gcm_tls_cipher(ctx, out, in, len); + + if (!gctx->iv_set) + return -1; + + if (in != NULL) { + if (out == NULL) { + if (CRYPTO_gcm128_aad(&gctx->gcm, in, len)) + return -1; + } else if (ctx->encrypt) { + if (gctx->ctr != NULL) { + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + return -1; + } else { + if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, len)) + return -1; + } + } else { + if (gctx->ctr != NULL) { + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + return -1; + } else { + if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, len)) + return -1; + } + } + return len; + } else { + if (!ctx->encrypt) { + if (gctx->taglen < 0) + return -1; + if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0) + return -1; + gctx->iv_set = 0; + return 0; + } + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16); + gctx->taglen = 16; + /* Don't reuse the IV */ + gctx->iv_set = 0; + return 0; + } +} + +static int wbsm4_xiaolai_gcm_cleanup(EVP_CIPHER_CTX *c) +{ + EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX, c); + const unsigned char *iv; + + if (gctx == NULL) + return 0; + + iv = EVP_CIPHER_CTX_iv(c); + if (iv != gctx->iv) + OPENSSL_free(gctx->iv); + + OPENSSL_cleanse(gctx, sizeof(*gctx)); + return 1; +} + +static int wbsm4_xiaolai_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,c); + + switch (type) { + case EVP_CTRL_INIT: + cctx->key_set = 0; + cctx->iv_set = 0; + cctx->L = 8; + cctx->M = 12; + cctx->tag_set = 0; + cctx->len_set = 0; + cctx->tls_aad_len = -1; + return 1; + case EVP_CTRL_GET_IVLEN: + *(int *)ptr = 15 - cctx->L; + return 1; + case EVP_CTRL_AEAD_TLS1_AAD: + /* Save the AAD for later use */ + if (arg != EVP_AEAD_TLS1_AAD_LEN) + return 0; + memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg); + cctx->tls_aad_len = arg; + { + uint16_t len = + EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8 + | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1]; + + /* Correct length for explicit IV */ + if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN) + return 0; + len -= EVP_CCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ + if (!EVP_CIPHER_CTX_encrypting(c)) { + if (len < cctx->M) + return 0; + len -= cctx->M; + } + EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8; + EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff; + } + /* Extra padding: tag appended to record */ + return cctx->M; + + case EVP_CTRL_CCM_SET_IV_FIXED: + /* Sanity check length */ + if (arg != EVP_CCM_TLS_FIXED_IV_LEN) + return 0; + /* Just copy to first part of IV */ + memcpy(EVP_CIPHER_CTX_iv_noconst(c), ptr, arg); + return 1; + + case EVP_CTRL_AEAD_SET_IVLEN: + arg = 15 - arg; + /* fall thru */ + case EVP_CTRL_CCM_SET_L: + if (arg < 2 || arg > 8) + return 0; + cctx->L = arg; + return 1; + + case EVP_CTRL_AEAD_SET_TAG: + if ((arg & 1) || arg < 4 || arg > 16) + return 0; + if (EVP_CIPHER_CTX_encrypting(c) && ptr) + return 0; + if (ptr) { + cctx->tag_set = 1; + memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg); + } + cctx->M = arg; + return 1; + + case EVP_CTRL_AEAD_GET_TAG: + if (!EVP_CIPHER_CTX_encrypting(c) || !cctx->tag_set) + return 0; + if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg)) + return 0; + cctx->tag_set = 0; + cctx->iv_set = 0; + cctx->len_set = 0; + return 1; + + case EVP_CTRL_COPY: + { + EVP_CIPHER_CTX *out = ptr; + EVP_SM4_CCM_CTX *cctx_out = EVP_C_DATA(EVP_SM4_CCM_CTX,out); + + if (cctx->ccm.key) { + if (cctx->ccm.key != &cctx->ks) + return 0; + cctx_out->ccm.key = &cctx_out->ks; + } + return 1; + } + + default: + return -1; + + } +} + +static int wbsm4_xiaolai_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, + const unsigned char *iv, int enc) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,ctx); + + if (iv == NULL && key == NULL) + return 1; + if (key != NULL) + do { + wbsm4_xiaolai_set_key(key, &cctx->ks); + CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L, + &cctx->ks, (block128_f)wbsm4_xiaolai_encrypt); + cctx->str = NULL; + cctx->key_set = 1; + } while (0); + if (iv != NULL) { + memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, 15 - cctx->L); + cctx->iv_set = 1; + } + return 1; +} + +static int wbsm4_xiaolai_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) +{ + EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,ctx); + CCM128_CONTEXT *ccm = &cctx->ccm; + + /* If not set up, return error */ + if (!cctx->key_set) + return -1; + + /* EVP_*Final() doesn't return any data */ + if (in == NULL && out != NULL) + return 0; + + if (!cctx->iv_set) + return -1; + + if (out == NULL) { + if (in == NULL) { + if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), + 15 - cctx->L, len)) + return -1; + cctx->len_set = 1; + return len; + } + /* If have AAD need message length */ + if (!cctx->len_set && len) + return -1; + CRYPTO_ccm128_aad(ccm, in, len); + return len; + } + + /* The tag must be set before actually decrypting data */ + if (!EVP_CIPHER_CTX_encrypting(ctx) && !cctx->tag_set) + return -1; + + /* If not set length yet do it */ + if (!cctx->len_set) { + if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx), + 15 - cctx->L, len)) + return -1; + cctx->len_set = 1; + } + if (EVP_CIPHER_CTX_encrypting(ctx)) { + if (cctx->str != NULL ? + CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len, + cctx->str) : + CRYPTO_ccm128_encrypt(ccm, in, out, len)) + return -1; + cctx->tag_set = 1; + return len; + } else { + int rv = -1; + + if (cctx->str != NULL ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len, + cctx->str) : + !CRYPTO_ccm128_decrypt(ccm, in, out, len)) { + unsigned char tag[16]; + if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) { + if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx), + cctx->M)) + rv = len; + } + } + if (rv == -1) + OPENSSL_cleanse(out, len); + cctx->iv_set = 0; + cctx->tag_set = 0; + cctx->len_set = 0; + return rv; + } + +} + +static int wbsm4_xiaolai_ccm_cleanup(EVP_CIPHER_CTX *c) +{ + return 1; +} + +#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ + | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH) + +BLOCK_CIPHER_custom(NID_wbsm4_xiaolai, 1, 12, gcm, GCM, + EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS) +BLOCK_CIPHER_custom(NID_wbsm4_xiaolai, 1, 12, ccm, CCM, + EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS) +#endif diff --git a/crypto/objects/obj_dat.h b/crypto/objects/obj_dat.h index 0228fd0b7..596644691 100644 --- a/crypto/objects/obj_dat.h +++ b/crypto/objects/obj_dat.h @@ -933,7 +933,7 @@ static const unsigned char so[6628] = { 0x60,0x86,0x48,0x01,0x86,0xF9,0x66,0xAD,0xCA,0x7B,0x01,0x01, /* [ 6615] OBJ_oracle_jdk_trustedkeyusage */ }; -#define NUM_NID 1260 +#define NUM_NID 1281 static const ASN1_OBJECT nid_objs[NUM_NID] = { {"UNDEF", "undefined", NID_undef}, {"rsadsi", "RSA Data Security, Inc.", NID_rsadsi, 6, &so[0]}, @@ -2195,9 +2195,30 @@ static const ASN1_OBJECT nid_objs[NUM_NID] = { {"hmacWithSM3", "hmacWithSM3", NID_hmacWithSM3, 10, &so[6598]}, {"oracle-organization", "Oracle organization", NID_oracle, 7, &so[6608]}, {"oracle-jdk-trustedkeyusage", "Trusted key usage (Oracle)", NID_oracle_jdk_trustedkeyusage, 12, &so[6615]}, + {"WBSM4-XIAOLAI-ECB", "wbsm4-xiaolai-ecb", NID_wbsm4_xiaolai_ecb}, + {"WBSM4-XIAOLAI-CBC", "wbsm4-xiaolai-cbc", NID_wbsm4_xiaolai_cbc}, + {"WBSM4-XIAOLAI-OFB", "wbsm4-xiaolai-ofb", NID_wbsm4_xiaolai_ofb128}, + {"WBSM4-XIAOLAI-CFB", "wbsm4-xiaolai-cfb", NID_wbsm4_xiaolai_cfb128}, + {"WBSM4-XIAOLAI-CTR", "wbsm4-xiaolai-ctr", NID_wbsm4_xiaolai_ctr}, + {"WBSM4-XIAOLAI-GCM", "wbsm4-xiaolai-gcm", NID_wbsm4_xiaolai_gcm}, + {"WBSM4-XIAOLAI-CCM", "wbsm4-xiaolai-ccm", NID_wbsm4_xiaolai_ccm}, + {"WBSM4-BAIWU-ECB", "wbsm4-baiwu-ecb", NID_wbsm4_baiwu_ecb}, + {"WBSM4-BAIWU-CBC", "wbsm4-baiwu-cbc", NID_wbsm4_baiwu_cbc}, + {"WBSM4-BAIWU-CTR", "wbsm4-baiwu-ctr", NID_wbsm4_baiwu_ctr}, + {"WBSM4-BAIWU-GCM", "wbsm4-baiwu-gcm", NID_wbsm4_baiwu_gcm}, + {"WBSM4-BAIWU-CCM", "wbsm4-baiwu-ccm", NID_wbsm4_baiwu_ccm}, + {"WBSM4-WSISE-ECB", "wbsm4-wsise-ecb", NID_wbsm4_wsise_ecb}, + {"WBSM4-WSISE-CBC", "wbsm4-wsise-cbc", NID_wbsm4_wsise_cbc}, + {"WBSM4-WSISE-CTR", "wbsm4-wsise-ctr", NID_wbsm4_wsise_ctr}, + {"WBSM4-WSISE-GCM", "wbsm4-wsise-gcm", NID_wbsm4_wsise_gcm}, + {"WBSM4-WSISE-CCM", "wbsm4-wsise-ccm", NID_wbsm4_wsise_ccm}, + {"WBSM4-BAIWU-OFB", "wbsm4-baiwu-ofb", NID_wbsm4_baiwu_ofb128}, + {"WBSM4-BAIWU-CFB", "wbsm4-baiwu-cfb", NID_wbsm4_baiwu_cfb128}, + {"WBSM4-WSISE-OFB", "wbsm4-wsise-ofb", NID_wbsm4_wsise_ofb128}, + {"WBSM4-WSISE-CFB", "wbsm4-wsise-cfb", NID_wbsm4_wsise_cfb128}, }; -#define NUM_SN 1010 +#define NUM_SN 1031 static const unsigned int sn_objs[NUM_SN] = { 364, /* "AD_DVCS" */ 419, /* "AES-128-CBC" */ @@ -2393,6 +2414,27 @@ static const unsigned int sn_objs[NUM_SN] = { 1021, /* "TLS1-PRF" */ 458, /* "UID" */ 0, /* "UNDEF" */ + 1268, /* "WBSM4-BAIWU-CBC" */ + 1271, /* "WBSM4-BAIWU-CCM" */ + 1278, /* "WBSM4-BAIWU-CFB" */ + 1269, /* "WBSM4-BAIWU-CTR" */ + 1267, /* "WBSM4-BAIWU-ECB" */ + 1270, /* "WBSM4-BAIWU-GCM" */ + 1277, /* "WBSM4-BAIWU-OFB" */ + 1273, /* "WBSM4-WSISE-CBC" */ + 1276, /* "WBSM4-WSISE-CCM" */ + 1280, /* "WBSM4-WSISE-CFB" */ + 1274, /* "WBSM4-WSISE-CTR" */ + 1272, /* "WBSM4-WSISE-ECB" */ + 1275, /* "WBSM4-WSISE-GCM" */ + 1279, /* "WBSM4-WSISE-OFB" */ + 1261, /* "WBSM4-XIAOLAI-CBC" */ + 1266, /* "WBSM4-XIAOLAI-CCM" */ + 1263, /* "WBSM4-XIAOLAI-CFB" */ + 1264, /* "WBSM4-XIAOLAI-CTR" */ + 1260, /* "WBSM4-XIAOLAI-ECB" */ + 1265, /* "WBSM4-XIAOLAI-GCM" */ + 1262, /* "WBSM4-XIAOLAI-OFB" */ 1034, /* "X25519" */ 1035, /* "X448" */ 11, /* "X500" */ @@ -3211,7 +3253,7 @@ static const unsigned int sn_objs[NUM_SN] = { 1093, /* "x509ExtAdmission" */ }; -#define NUM_LN 1010 +#define NUM_LN 1031 static const unsigned int ln_objs[NUM_LN] = { 363, /* "AD Time Stamping" */ 405, /* "ANSI X9.62" */ @@ -4213,6 +4255,27 @@ static const unsigned int ln_objs[NUM_LN] = { 740, /* "wap-wsg-idm-ecid-wtls7" */ 741, /* "wap-wsg-idm-ecid-wtls8" */ 742, /* "wap-wsg-idm-ecid-wtls9" */ + 1268, /* "wbsm4-baiwu-cbc" */ + 1271, /* "wbsm4-baiwu-ccm" */ + 1278, /* "wbsm4-baiwu-cfb" */ + 1269, /* "wbsm4-baiwu-ctr" */ + 1267, /* "wbsm4-baiwu-ecb" */ + 1270, /* "wbsm4-baiwu-gcm" */ + 1277, /* "wbsm4-baiwu-ofb" */ + 1273, /* "wbsm4-wsise-cbc" */ + 1276, /* "wbsm4-wsise-ccm" */ + 1280, /* "wbsm4-wsise-cfb" */ + 1274, /* "wbsm4-wsise-ctr" */ + 1272, /* "wbsm4-wsise-ecb" */ + 1275, /* "wbsm4-wsise-gcm" */ + 1279, /* "wbsm4-wsise-ofb" */ + 1261, /* "wbsm4-xiaolai-cbc" */ + 1266, /* "wbsm4-xiaolai-ccm" */ + 1263, /* "wbsm4-xiaolai-cfb" */ + 1264, /* "wbsm4-xiaolai-ctr" */ + 1260, /* "wbsm4-xiaolai-ecb" */ + 1265, /* "wbsm4-xiaolai-gcm" */ + 1262, /* "wbsm4-xiaolai-ofb" */ 868, /* "x121Address" */ 503, /* "x500UniqueIdentifier" */ 158, /* "x509Certificate" */ diff --git a/crypto/objects/obj_mac.num b/crypto/objects/obj_mac.num index 5e222b3f7..087fd208b 100644 --- a/crypto/objects/obj_mac.num +++ b/crypto/objects/obj_mac.num @@ -1027,3 +1027,24 @@ delegation_usage 1256 hmacWithSM3 1257 oracle 1258 oracle_jdk_trustedkeyusage 1259 +wbsm4_xiaolai_ecb 1260 +wbsm4_xiaolai_cbc 1261 +wbsm4_xiaolai_ofb128 1262 +wbsm4_xiaolai_cfb128 1263 +wbsm4_xiaolai_ctr 1264 +wbsm4_xiaolai_gcm 1265 +wbsm4_xiaolai_ccm 1266 +wbsm4_baiwu_ecb 1267 +wbsm4_baiwu_cbc 1268 +wbsm4_baiwu_ctr 1269 +wbsm4_baiwu_gcm 1270 +wbsm4_baiwu_ccm 1271 +wbsm4_wsise_ecb 1272 +wbsm4_wsise_cbc 1273 +wbsm4_wsise_ctr 1274 +wbsm4_wsise_gcm 1275 +wbsm4_wsise_ccm 1276 +wbsm4_baiwu_ofb128 1277 +wbsm4_baiwu_cfb128 1278 +wbsm4_wsise_ofb128 1279 +wbsm4_wsise_cfb128 1280 diff --git a/crypto/objects/objects.txt b/crypto/objects/objects.txt index 9aede25bd..dbd7941ad 100644 --- a/crypto/objects/objects.txt +++ b/crypto/objects/objects.txt @@ -1416,3 +1416,34 @@ dstu4145le 2 9 : uacurve9 : DSTU curve 9 joint-iso-itu-t 16 840 1 113894 : oracle-organization : Oracle organization # Jdk trustedKeyUsage attribute oracle 746875 1 1 : oracle-jdk-trustedkeyusage : Trusted key usage (Oracle) + +# Definitions for WBSM4 cipher + : WBSM4-XIAOLAI-ECB : wbsm4-xiaolai-ecb + : WBSM4-XIAOLAI-CBC : wbsm4-xiaolai-cbc +!Cname wbsm4-xiaolai-ofb128 + : WBSM4-XIAOLAI-OFB : wbsm4-xiaolai-ofb +!Cname wbsm4-xiaolai-cfb128 + : WBSM4-XIAOLAI-CFB : wbsm4-xiaolai-cfb + : WBSM4-XIAOLAI-CTR : wbsm4-xiaolai-ctr + : WBSM4-XIAOLAI-GCM : wbsm4-xiaolai-gcm + : WBSM4-XIAOLAI-CCM : wbsm4-xiaolai-ccm + + : WBSM4-BAIWU-ECB : wbsm4-baiwu-ecb + : WBSM4-BAIWU-CBC : wbsm4-baiwu-cbc +!Cname wbsm4-baiwu-ofb128 + : WBSM4-BAIWU-OFB : wbsm4-baiwu-ofb +!Cname wbsm4-baiwu-cfb128 + : WBSM4-BAIWU-CFB : wbsm4-baiwu-cfb + : WBSM4-BAIWU-CTR : wbsm4-baiwu-ctr + : WBSM4-BAIWU-GCM : wbsm4-baiwu-gcm + : WBSM4-BAIWU-CCM : wbsm4-baiwu-ccm + + : WBSM4-WSISE-ECB : wbsm4-wsise-ecb + : WBSM4-WSISE-CBC : wbsm4-wsise-cbc +!Cname wbsm4-wsise-ofb128 + : WBSM4-WSISE-OFB : wbsm4-wsise-ofb +!Cname wbsm4-wsise-cfb128 + : WBSM4-WSISE-CFB : wbsm4-wsise-cfb + : WBSM4-WSISE-CTR : wbsm4-wsise-ctr + : WBSM4-WSISE-GCM : wbsm4-wsise-gcm + : WBSM4-WSISE-CCM : wbsm4-wsise-ccm diff --git a/crypto/sm4/build.info b/crypto/sm4/build.info index e27aa49e6..33fa0e3e5 100644 --- a/crypto/sm4/build.info +++ b/crypto/sm4/build.info @@ -14,6 +14,15 @@ ENDIF SOURCE[../../libcrypto]= $SM4ASM sm4.c +IF[{- !$disabled{wbsm4} -}] +SOURCE[../../libcrypto]= ${SOURCE[../../libcrypto]} \ + wb/wbsm4.c \ + wb/Bai-Wu-wbsm4.c \ + wb/Xiao-Lai-wbsm4.c \ + wb/WSISE-wbsm4.c \ + wb/WBMatrix.c +ENDIF + # Implementations are now spread across several libraries, so the defines # need to be applied to all affected libraries and modules. diff --git a/crypto/sm4/wb/Bai-Wu-wbsm4.c b/crypto/sm4/wb/Bai-Wu-wbsm4.c new file mode 100644 index 000000000..e211c46f6 --- /dev/null +++ b/crypto/sm4/wb/Bai-Wu-wbsm4.c @@ -0,0 +1,336 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/Bai-Wu-White-box-SM4. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include +#include "crypto/wbsm4.h" +#include "WBMatrix.h" + +#define GET32(pc) ( \ + ((uint32_t)(pc)[0] << 24) ^ \ + ((uint32_t)(pc)[1] << 16) ^ \ + ((uint32_t)(pc)[2] << 8) ^ \ + ((uint32_t)(pc)[3])) + +#define PUT32(st, ct) \ + (ct)[0] = (uint8_t)((st) >> 24); \ + (ct)[1] = (uint8_t)((st) >> 16); \ + (ct)[2] = (uint8_t)((st) >> 8); \ + (ct)[3] = (uint8_t)(st) + +static uint8_t SBOX[256]={ + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, +}; + +static M32 L_matrix = { + .M[0] = 0xA0202080, + .M[1] = 0x50101040, + .M[2] = 0x28080820, + .M[3] = 0x14040410, + .M[4] = 0xA020208, + .M[5] = 0x5010104, + .M[6] = 0x2808082, + .M[7] = 0x1404041, + .M[8] = 0x80A02020, + .M[9] = 0x40501010, + .M[10] = 0x20280808, + .M[11] = 0x10140404, + .M[12] = 0x80A0202, + .M[13] = 0x4050101, + .M[14] = 0x82028080, + .M[15] = 0x41014040, + .M[16] = 0x2080A020, + .M[17] = 0x10405010, + .M[18] = 0x8202808, + .M[19] = 0x4101404, + .M[20] = 0x2080A02, + .M[21] = 0x1040501, + .M[22] = 0x80820280, + .M[23] = 0x40410140, + .M[24] = 0x202080A0, + .M[25] = 0x10104050, + .M[26] = 0x8082028, + .M[27] = 0x4041014, + .M[28] = 0x202080A, + .M[29] = 0x1010405, + .M[30] = 0x80808202, + .M[31] = 0x40404101 +}; + +void wbsm4_baiwu_set_key(const uint8_t *key, wbsm4_baiwu_key *wbsm4_key) +{ + DECLARE_IS_ENDIAN; + + *wbsm4_key = *(wbsm4_baiwu_key *)key; + if (IS_LITTLE_ENDIAN) + return; + + uint8_t *p = (uint8_t *)wbsm4_key; + uint8_t *end = p + sizeof(wbsm4_baiwu_key); + while (p < end) + { + uint8_t t; + t = p[0]; + p[0] = p[3]; + p[3] = t; + + t = p[1]; + p[1] = p[2]; + p[2] = t; + + p += 4; + } +} + +void wbsm4_baiwu_export_key(const wbsm4_baiwu_key *wbsm4_key, uint8_t *key) +{ + DECLARE_IS_ENDIAN; + + wbsm4_baiwu_key *out = (wbsm4_baiwu_key *)key; + *out = *wbsm4_key; + if (IS_LITTLE_ENDIAN) + return; + + uint8_t *p = (uint8_t *)out; + uint8_t *end = p + sizeof(wbsm4_baiwu_key); + while (p < end) + { + uint8_t t; + t = p[0]; + p[0] = p[3]; + p[3] = t; + + t = p[1]; + p[1] = p[2]; + p[2] = t; + + p += 4; + } +} + +void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key) +{ + int i, j, r, x, y; + uint8_t temp_u8_x, temp_u8_y, temp_u8; + uint32_t temp_u32; + uint32_t TD0_u32[6], TD1_u32[6], TD2_u32[3], TR_u32[4], Lc[36], Ec0[32], Ec1[32]; + M32 L[36]; + M32 L_inv[36]; + M8 E[32][2][4]; + M8 E_inv[32][2][4]; + M32 Ei_inv[32][2]; + M32 M[32][2][3]; + M32 C[32]; + M32 LL; + + uint32_t SK[32]; + wbsm4_sm4_setkey(SK, sm4_key); + + for (r = 0; r < 36; r++) + { + genMatpairM32(&L[r], &L_inv[r]); + Lc[r] = cus_random(); + } + + for (r = 0; r < 32; r++) + { + for (j = 0; j < 4; j++) + { + genMatpairM8(&E[r][0][j], &E_inv[r][0][j]); + genMatpairM8(&E[r][1][j], &E_inv[r][1][j]); + } + + MatrixcomM8to32(E_inv[r][0][0], E_inv[r][0][1], E_inv[r][0][2], E_inv[r][0][3], &Ei_inv[r][0]); + MatrixcomM8to32(E_inv[r][1][0], E_inv[r][1][1], E_inv[r][1][2], E_inv[r][1][3], &Ei_inv[r][1]); + + MatMulMatM32(Ei_inv[r][0], L_inv[r + 1], &M[r][0][0]); + MatMulMatM32(Ei_inv[r][0], L_inv[r + 2], &M[r][0][1]); + MatMulMatM32(Ei_inv[r][0], L_inv[r + 3], &M[r][0][2]); + + MatMulMatM32(Ei_inv[r][1], L_inv[r + 1], &M[r][1][0]); + MatMulMatM32(Ei_inv[r][1], L_inv[r + 2], &M[r][1][1]); + MatMulMatM32(Ei_inv[r][1], L_inv[r + 3], &M[r][1][2]); + + MatMulMatM32(L[r + 4], L_inv[r], &C[r]); + } + + for (r = 0; r < 32; r++) + { + MatMulMatM32(L[r + 4], L_matrix, &LL); + + for (i = 0; i < 6; i++) + { + TD0_u32[i] = cus_random(); + TD1_u32[i] = cus_random(); + } + for (i = 0; i < 4; i++) + { + TR_u32[i] = cus_random(); + } + for (i = 0; i < 3; i++) + { + TD2_u32[i] = cus_random(); + } + Ec0[r] = TD0_u32[0] ^ TD0_u32[1] ^ TD0_u32[2] ^ TD0_u32[3] ^ TD0_u32[4] ^ TD0_u32[5]; + Ec1[r] = TD1_u32[0] ^ TD1_u32[1] ^ TD1_u32[2] ^ TD1_u32[3] ^ TD1_u32[4] ^ TD1_u32[5]; + + for (x = 0; x < 256; x++) + { + for (j = 0; j < 4; j++) + { + temp_u8 = x ^ ((Lc[r] >> (24 - j * 8)) & 0xff); + temp_u32 = temp_u8 << (24 - j * 8); + wbsm4_key->TD[r][0][j][x] = MatMulNumM32(C[r], temp_u32); + } + for (j = 0; j < 3; j++) + { + wbsm4_key->TD[r][0][j][x] ^= TD2_u32[j]; + } + wbsm4_key->TD[r][0][3][x] ^= Lc[r + 4] ^ TD2_u32[0] ^ TD2_u32[1] ^ TD2_u32[2] ^ TR_u32[0] ^ TR_u32[1] ^ TR_u32[2] ^ TR_u32[3]; + + for (i = 1; i < 4; i++) + { + temp_u8 = x ^ ((Lc[r + i] >> 24) & 0xff); + temp_u32 = temp_u8 << 24; + wbsm4_key->TD[r][i][0][x] = MatMulNumM32(M[r][0][i - 1], temp_u32); + temp_u8 = x ^ ((Lc[r + i] >> 16) & 0xff); + temp_u32 = temp_u8 << 16; + wbsm4_key->TD[r][i][1][x] = MatMulNumM32(M[r][0][i - 1], temp_u32); + + temp_u8 = x ^ ((Lc[r + i] >> 8) & 0xff); + temp_u32 = temp_u8 << 8; + wbsm4_key->TD[r][i][2][x] = MatMulNumM32(M[r][1][i - 1], temp_u32); + temp_u8 = x ^ (Lc[r + i] & 0xff); + temp_u32 = temp_u8; + wbsm4_key->TD[r][i][3][x] = MatMulNumM32(M[r][1][i - 1], temp_u32); + } + + j = 0; + for (i = 1; i < 4; i++) + { + wbsm4_key->TD[r][i][0][x] ^= TD0_u32[j++]; + wbsm4_key->TD[r][i][1][x] ^= TD0_u32[j++]; + } + + j = 0; + for (i = 1; i < 4; i++) + { + wbsm4_key->TD[r][i][2][x] ^= TD1_u32[j++]; + wbsm4_key->TD[r][i][3][x] ^= TD1_u32[j++]; + } + } + + for (x = 0; x < 256; x++) + { + for (y = 0; y < 256; y++) + { + for (j = 0; j < 4; j++) + { + temp_u8_x = x ^ ((Ec0[r] >> (24 - j * 8)) & 0xff); + temp_u8_x = MatMulNumM8(E[r][0][j], temp_u8_x); + + temp_u8_y = y ^ ((Ec1[r] >> (24 - j * 8)) & 0xff); + temp_u8_y = MatMulNumM8(E[r][1][j], temp_u8_y); + temp_u8 = SBOX[temp_u8_x ^ temp_u8_y ^ ((SK[r] >> (24 - j * 8)) & 0xff)]; + temp_u32 = temp_u8 << (24 - j * 8); + wbsm4_key->TR[r][j][x][y] = MatMulNumM32(LL, temp_u32); + wbsm4_key->TR[r][j][x][y] ^= TR_u32[j]; + } + } + } + } + + // external encoding + for (i = 0; i < 4; i++) + { + wbsm4_key->SE[i].Mat = L[i]; + wbsm4_key->SE[i].Vec.V = Lc[i]; + + wbsm4_key->FE[i].Mat = L_inv[35 - i]; + wbsm4_key->FE[i].Vec.V = MatMulNumM32(L_inv[35 - i], Lc[35 - i]); + } +} + +void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_baiwu_key *wbsm4_key) +{ + int r, i, j; + uint32_t x[36]; + uint32_t s0, s1; + + x[0] = GET32(IN); + x[1] = GET32(IN + 4); + x[2] = GET32(IN + 8); + x[3] = GET32(IN + 12); + x[0] = affineU32(wbsm4_key->SE[0], x[0]); + x[1] = affineU32(wbsm4_key->SE[1], x[1]); + x[2] = affineU32(wbsm4_key->SE[2], x[2]); + x[3] = affineU32(wbsm4_key->SE[3], x[3]); + + for (r = 0; r < 32; r++) + { + x[r + 4] = 0; + s0 = 0; + s1 = 0; + + for (i = 1; i < 4; i++) + { + s0 ^= wbsm4_key->TD[r][i][0][(x[r + i] >> 24) & 0xff]; + s0 ^= wbsm4_key->TD[r][i][1][(x[r + i] >> 16) & 0xff]; + s1 ^= wbsm4_key->TD[r][i][2][(x[r + i] >> 8) & 0xff]; + s1 ^= wbsm4_key->TD[r][i][3][x[r + i] & 0xff]; + } + for (j = 0; j < 4; j++) + { + x[r + 4] ^= wbsm4_key->TR[r][j][(s0 >> (24 - j * 8)) & 0xff][(s1 >> (24 - j * 8)) & 0xff]; + x[r + 4] ^= wbsm4_key->TD[r][0][j][(x[r] >> (24 - j * 8)) & 0xff]; + } + } + + x[35] = affineU32(wbsm4_key->FE[0], x[35]); + x[34] = affineU32(wbsm4_key->FE[1], x[34]); + x[33] = affineU32(wbsm4_key->FE[2], x[33]); + x[32] = affineU32(wbsm4_key->FE[3], x[32]); + PUT32(x[35], OUT); + PUT32(x[34], OUT + 4); + PUT32(x[33], OUT + 8); + PUT32(x[32], OUT + 12); +} \ No newline at end of file diff --git a/crypto/sm4/wb/WBMatrix.c b/crypto/sm4/wb/WBMatrix.c new file mode 100644 index 000000000..cd840b2e4 --- /dev/null +++ b/crypto/sm4/wb/WBMatrix.c @@ -0,0 +1,5364 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/WBMatrix. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include "WBMatrix.h" + +// 8bit internal xor table +static int xor [] = {0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, + 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, + 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0}; + +// 8bit Hamming weight table +static int HW[] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, + 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, + 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, + 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, + 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, + 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, + 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, + 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, + 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, + 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, + 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + +static uint8_t idM4[4] = {0x08, 0x04, 0x02, 0x01}; +static uint8_t idM8[8] = {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01}; +static uint16_t idM16[16] = {0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; +static uint32_t idM32[32] = {0x80000000, 0x40000000, 0x20000000, 0x10000000, 0x8000000, 0x4000000, 0x2000000, 0x1000000, 0x800000, 0x400000, 0x200000, 0x100000, 0x80000, 0x40000, 0x20000, 0x10000, 0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; +static uint64_t idM64[64] = {0x8000000000000000, 0x4000000000000000, 0x2000000000000000, 0x1000000000000000, 0x800000000000000, 0x400000000000000, 0x200000000000000, 0x100000000000000, 0x80000000000000, 0x40000000000000, 0x20000000000000, 0x10000000000000, 0x8000000000000, 0x4000000000000, 0x2000000000000, 0x1000000000000, 0x800000000000, 0x400000000000, 0x200000000000, 0x100000000000, 0x80000000000, 0x40000000000, 0x20000000000, 0x10000000000, 0x8000000000, 0x4000000000, 0x2000000000, 0x1000000000, 0x800000000, 0x400000000, 0x200000000, 0x100000000, + 0x80000000, 0x40000000, 0x20000000, 0x10000000, 0x8000000, 0x4000000, 0x2000000, 0x1000000, 0x800000, 0x400000, 0x200000, 0x100000, 0x80000, 0x40000, 0x20000, 0x10000, 0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; + +void initM4(M4 *Mat) // initial Matrix 4*4 +{ + int i; + for (i = 0; i < 4; i++) + { + (*Mat).M[i] = 0; + } +} +void initM8(M8 *Mat) // initial Matrix 8*8 +{ + int i; + for (i = 0; i < 8; i++) + { + (*Mat).M[i] = 0; + } +} +void initM16(M16 *Mat) // initial Matrix 16*16 +{ + int i; + for (i = 0; i < 16; i++) + { + (*Mat).M[i] = 0; + } +} +void initM32(M32 *Mat) // initial Matrix 32*32 +{ + int i; + for (i = 0; i < 32; i++) + { + (*Mat).M[i] = 0; + } +} +void initM64(M64 *Mat) // initial Matrix 64*64 +{ + int i; + for (i = 0; i < 64; i++) + { + (*Mat).M[i] = 0; + } +} +void initM128(M128 *Mat) // initial Matrix 128*128 +{ + int i; + for (i = 0; i < 128; i++) + { + (*Mat).M[i][0] = 0; + (*Mat).M[i][1] = 0; + } +} +void initM256(M256 *Mat) // initial Matrix 256*256 +{ + int i; + for (i = 0; i < 256; i++) + { + (*Mat).M[i][0] = 0; + (*Mat).M[i][1] = 0; + (*Mat).M[i][2] = 0; + (*Mat).M[i][3] = 0; + } +} +void initV4(V4 *Vec) // initial Vector 4*1 +{ + (*Vec).V = 0; +} +void initV8(V8 *Vec) // initial Vector 8*1 +{ + (*Vec).V = 0; +} +void initV16(V16 *Vec) // initial Vector 16*1 +{ + (*Vec).V = 0; +} +void initV32(V32 *Vec) // initial Vector 32*1 +{ + (*Vec).V = 0; +} +void initV64(V64 *Vec) // initial Vector 64*1 +{ + (*Vec).V = 0; +} +void initV128(V128 *Vec) // initial Vector 128*1 +{ + (*Vec).V[0] = 0; + (*Vec).V[1] = 0; +} +void initV256(V256 *Vec) // initial Vector 256*1 +{ + (*Vec).V[0] = 0; + (*Vec).V[1] = 0; + (*Vec).V[2] = 0; + (*Vec).V[3] = 0; +} +void randM4(M4 *Mat) // randomize Matrix 4*4 +{ + int i; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 4; i++) + { + (*Mat).M[i] = cus_random() & 0x0f; + } +} +void randM8(M8 *Mat) // randomize Matrix 8*8 +{ + int i; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 8; i++) + { + (*Mat).M[i] = cus_random(); + } +} +void randM16(M16 *Mat) // randomize Matrix 16*16 +{ + int i; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 16; i++) + { + (*Mat).M[i] = cus_random(); + } +} +void randM32(M32 *Mat) // randomize Matrix 32*32 +{ + int i; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 32; i++) + { + (*Mat).M[i] = cus_random(); + } +} +void randM64(M64 *Mat) // randomize Matrix 64*64 +{ + int i; + uint32_t *m; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 64; i++) + { + m = (uint32_t *)&((*Mat).M[i]); + *(m + 1) = cus_random(); + *m = cus_random(); + } +} +void randM128(M128 *Mat) // randomize Matrix 128*128 +{ + int i; + uint32_t *m; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 128; i++) + { + m = (uint32_t *)&((*Mat).M[i][0]); + *(m + 1) = cus_random(); + *m = cus_random(); + m = (uint32_t *)&((*Mat).M[i][1]); + *(m + 1) = cus_random(); + *m = cus_random(); + } +} +void randM256(M256 *Mat) // randomize Matrix 256*256 +{ + int i; + uint32_t *m; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + for (i = 0; i < 256; i++) + { + m = (uint32_t *)&((*Mat).M[i][0]); + *(m + 1) = cus_random(); + *m = cus_random(); + m = (uint32_t *)&((*Mat).M[i][1]); + *(m + 1) = cus_random(); + *m = cus_random(); + m = (uint32_t *)&((*Mat).M[i][2]); + *(m + 1) = cus_random(); + *m = cus_random(); + m = (uint32_t *)&((*Mat).M[i][3]); + *(m + 1) = cus_random(); + *m = cus_random(); + } +} +void identityM4(M4 *Mat) // identity matrix 4*4 +{ + int i; + for (i = 0; i < 4; i++) + { + (*Mat).M[i] = idM4[i]; + } +} +void identityM8(M8 *Mat) // identity matrix 8*8 +{ + int i; + for (i = 0; i < 8; i++) + { + (*Mat).M[i] = idM8[i]; + } +} +void identityM16(M16 *Mat) // identity matrix 16*16 +{ + int i; + for (i = 0; i < 16; i++) + { + (*Mat).M[i] = idM16[i]; + } +} +void identityM32(M32 *Mat) // identity matrix 32*32 +{ + int i; + for (i = 0; i < 32; i++) + { + (*Mat).M[i] = idM32[i]; + } +} +void identityM64(M64 *Mat) // identity matrix 64*64 +{ + int i; + for (i = 0; i < 64; i++) + { + (*Mat).M[i] = idM64[i]; + } +} +void identityM128(M128 *Mat) // identity matrix 128*128 +{ + int i; + for (i = 0; i < 64; i++) + { + (*Mat).M[i][0] = idM64[i]; + (*Mat).M[i][1] = 0; + } + for (i = 64; i < 128; i++) + { + (*Mat).M[i][0] = 0; + (*Mat).M[i][1] = idM64[i - 64]; + } +} +void identityM256(M256 *Mat) // identity matrix 256*256 +{ + int i; + for (i = 0; i < 64; i++) + { + (*Mat).M[i][0] = idM64[i]; + (*Mat).M[i][1] = 0; + (*Mat).M[i][2] = 0; + (*Mat).M[i][3] = 0; + } + for (i = 64; i < 128; i++) + { + (*Mat).M[i][0] = 0; + (*Mat).M[i][1] = idM64[i - 64]; + (*Mat).M[i][2] = 0; + (*Mat).M[i][3] = 0; + } + for (i = 128; i < 192; i++) + { + (*Mat).M[i][0] = 0; + (*Mat).M[i][1] = 0; + (*Mat).M[i][2] = idM64[i - 128]; + (*Mat).M[i][3] = 0; + } + for (i = 192; i < 256; i++) + { + (*Mat).M[i][0] = 0; + (*Mat).M[i][1] = 0; + (*Mat).M[i][2] = 0; + (*Mat).M[i][3] = idM64[i - 192]; + } +} +void randV4(V4 *Vec) // randomize Vector 4*1 +{ + // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + (*Vec).V = cus_random() & 0x0f; +} +void randV8(V8 *Vec) // randomize Vector 8*1 +{ + // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + (*Vec).V = cus_random(); +} +void randV16(V16 *Vec) // randomize Vector 16*1 +{ + // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + (*Vec).V = cus_random(); +} +void randV32(V32 *Vec) // randomize Vector 32*1 +{ + // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + (*Vec).V = cus_random(); +} +void randV64(V64 *Vec) // randomize Vector 64*1 +{ + uint32_t *v = (uint32_t *)&((*Vec).V); + *(v + 1) = cus_random(); + *v = cus_random(); +} +void randV128(V128 *Vec) // randomize Vector 128*1 +{ + uint32_t *v = (uint32_t *)&((*Vec).V[0]); + // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + *(v + 1) = cus_random(); + *v = cus_random(); + v = (uint32_t *)&((*Vec).V[1]); + *(v + 1) = cus_random(); + *v = cus_random(); +} +void randV256(V256 *Vec) // randomize Vector 256*1 +{ + uint32_t *v = (uint32_t *)&((*Vec).V[0]); + // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + *(v + 1) = cus_random(); + *v = cus_random(); + v = (uint32_t *)&((*Vec).V[1]); + *(v + 1) = cus_random(); + *v = cus_random(); + v = (uint32_t *)&((*Vec).V[2]); + *(v + 1) = cus_random(); + *v = cus_random(); + v = (uint32_t *)&((*Vec).V[3]); + *(v + 1) = cus_random(); + *v = cus_random(); +} +void printM4(M4 Mat) // printf Matrix 4*4 +{ + int i; + for (i = 0; i < 4; i++) + { + printf("0x%x\n", Mat.M[i]); + } +} +void printM8(M8 Mat) // printf Matrix 8*8 +{ + int i; + for (i = 0; i < 8; i++) + { + printf("0x%x\n", Mat.M[i]); + } +} +void printM16(M16 Mat) // printf Matrix 16*16 +{ + int i; + for (i = 0; i < 16; i++) + { + printf("0x%x\n", Mat.M[i]); + } +} +void printM32(M32 Mat) // printf Matrix 32*32 +{ + int i; + for (i = 0; i < 32; i++) + { + printf("0x%x\n", Mat.M[i]); + } +} +void printM64(M64 Mat) // printf Matrix 64*64 +{ + int i; + for (i = 0; i < 64; i++) + { + printf("0x%" PRIx64 "\n", Mat.M[i]); + } +} +void printM128(M128 Mat) // printf Matrix 128*128 +{ + int i; + for (i = 0; i < 128; i++) + { + printf("0x%" PRIx64 " ", Mat.M[i][0]); + printf("0x%" PRIx64 "\n", Mat.M[i][1]); + } +} +void printM256(M256 Mat) // printf Matrix 256*256 +{ + int i; + for (i = 0; i < 256; i++) + { + printf("0x%" PRIx64 " ", Mat.M[i][0]); + printf("0x%" PRIx64 " ", Mat.M[i][1]); + printf("0x%" PRIx64 " ", Mat.M[i][2]); + printf("0x%" PRIx64 "\n", Mat.M[i][3]); + } +} +void printV4(V4 Vec) // printf Vector 4*1 +{ + printf("0x%x\n", Vec.V); +} +void printV8(V8 Vec) // printf Vector 8*1 +{ + printf("0x%x\n", Vec.V); +} +void printV16(V16 Vec) // printf Vector 16*1 +{ + printf("0x%x\n", Vec.V); +} +void printV32(V32 Vec) // printf Vector 32*1 +{ + printf("0x%x\n", Vec.V); +} +void printV64(V64 Vec) // printf Vector 64*1 +{ + printf("0x%" PRIx64 "\n", Vec.V); +} +void printV128(V128 Vec) // printf Vector 128*1 +{ + printf("0x%" PRIx64 " ", Vec.V[0]); + printf("0x%" PRIx64 "\n", Vec.V[1]); +} +void printV256(V256 Vec) // printf Vector 256*1 +{ + printf("0x%" PRIx64 " ", Vec.V[0]); + printf("0x%" PRIx64 " ", Vec.V[1]); + printf("0x%" PRIx64 " ", Vec.V[2]); + printf("0x%" PRIx64 "\n", Vec.V[3]); +} +void copyM4(M4 Mat1, M4 *Mat2) +{ + int i; + for (i = 0; i < 4; i++) + { + (*Mat2).M[i] = Mat1.M[i]; + } +} +void copyM8(M8 Mat1, M8 *Mat2) +{ + int i; + for (i = 0; i < 8; i++) + { + (*Mat2).M[i] = Mat1.M[i]; + } +} +void copyM16(M16 Mat1, M16 *Mat2) +{ + int i; + for (i = 0; i < 16; i++) + { + (*Mat2).M[i] = Mat1.M[i]; + } +} +void copyM32(M32 Mat1, M32 *Mat2) +{ + int i; + for (i = 0; i < 32; i++) + { + (*Mat2).M[i] = Mat1.M[i]; + } +} +void copyM64(M64 Mat1, M64 *Mat2) +{ + int i; + for (i = 0; i < 64; i++) + { + (*Mat2).M[i] = Mat1.M[i]; + } +} +void copyM128(M128 Mat1, M128 *Mat2) +{ + int i; + for (i = 0; i < 128; i++) + { + (*Mat2).M[i][0] = Mat1.M[i][0]; + (*Mat2).M[i][1] = Mat1.M[i][1]; + } +} +void copyM256(M256 Mat1, M256 *Mat2) +{ + int i; + for (i = 0; i < 256; i++) + { + (*Mat2).M[i][0] = Mat1.M[i][0]; + (*Mat2).M[i][1] = Mat1.M[i][1]; + (*Mat2).M[i][2] = Mat1.M[i][2]; + (*Mat2).M[i][3] = Mat1.M[i][3]; + } +} +int isequalM4(M4 Mat1, M4 Mat2) +{ + int i; + for (i = 0; i < 4; i++) + { + if (Mat1.M[i] != Mat2.M[i]) + return 0; + } + return 1; +} +int isequalM8(M8 Mat1, M8 Mat2) +{ + int i; + for (i = 0; i < 8; i++) + { + if (Mat1.M[i] != Mat2.M[i]) + return 0; + } + return 1; +} +int isequalM16(M16 Mat1, M16 Mat2) +{ + int i; + for (i = 0; i < 16; i++) + { + if (Mat1.M[i] != Mat2.M[i]) + return 0; + } + return 1; +} +int isequalM32(M32 Mat1, M32 Mat2) +{ + int i; + for (i = 0; i < 32; i++) + { + if (Mat1.M[i] != Mat2.M[i]) + return 0; + } + return 1; +} +int isequalM64(M64 Mat1, M64 Mat2) +{ + int i; + for (i = 0; i < 64; i++) + { + if (Mat1.M[i] != Mat2.M[i]) + return 0; + } + return 1; +} +int isequalM128(M128 Mat1, M128 Mat2) +{ + int i; + for (i = 0; i < 128; i++) + { + if (Mat1.M[i][0] != Mat2.M[i][0]) + return 0; + if (Mat1.M[i][1] != Mat2.M[i][1]) + return 0; + } + return 1; +} +int isequalM256(M256 Mat1, M256 Mat2) +{ + int i; + for (i = 0; i < 256; i++) + { + if (Mat1.M[i][0] != Mat2.M[i][0]) + return 0; + if (Mat1.M[i][1] != Mat2.M[i][1]) + return 0; + if (Mat1.M[i][2] != Mat2.M[i][2]) + return 0; + if (Mat1.M[i][3] != Mat2.M[i][3]) + return 0; + } + return 1; +} +int isequalV4(V4 Vec1, V4 Vec2) +{ + if (Vec1.V != Vec2.V) + return 0; + return 1; +} +int isequalV8(V8 Vec1, V8 Vec2) +{ + if (Vec1.V != Vec2.V) + return 0; + return 1; +} +int isequalV16(V16 Vec1, V16 Vec2) +{ + if (Vec1.V != Vec2.V) + return 0; + return 1; +} +int isequalV32(V32 Vec1, V32 Vec2) +{ + if (Vec1.V != Vec2.V) + return 0; + return 1; +} +int isequalV64(V64 Vec1, V64 Vec2) +{ + if (Vec1.V != Vec2.V) + return 0; + return 1; +} +int isequalV128(V128 Vec1, V128 Vec2) +{ + if (Vec1.V[0] != Vec2.V[0]) + return 0; + if (Vec1.V[1] != Vec2.V[1]) + return 0; + return 1; +} +int isequalV256(V256 Vec1, V256 Vec2) +{ + if (Vec1.V[0] != Vec2.V[0]) + return 0; + if (Vec1.V[1] != Vec2.V[1]) + return 0; + if (Vec1.V[1] != Vec2.V[1]) + return 0; + if (Vec1.V[1] != Vec2.V[1]) + return 0; + return 1; +} +int readbitM4(M4 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-3 +{ + if ((Mat.M[i] & idM4[j]) == idM4[j]) + return 1; + else + return 0; +} +int readbitM8(M8 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-7 +{ + if ((Mat.M[i] & idM8[j]) == idM8[j]) + return 1; + else + return 0; +} +int readbitM16(M16 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-15 +{ + if ((Mat.M[i] & idM16[j]) == idM16[j]) + return 1; + else + return 0; +} +int readbitM32(M32 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-31 +{ + if ((Mat.M[i] & idM32[j]) == idM32[j]) + return 1; + else + return 0; +} +int readbitM64(M64 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-63 +{ + if ((Mat.M[i] & idM64[j]) == idM64[j]) + return 1; + else + return 0; +} +int readbitM128(M128 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-127 +{ + if (j < 64) + { + if ((Mat.M[i][0] & idM64[j]) == idM64[j]) + return 1; + else + return 0; + } + else + { + if ((Mat.M[i][1] & idM64[j - 64]) == idM64[j - 64]) + return 1; + else + return 0; + } +} +int readbitM256(M256 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-255 +{ + if (j < 64) + { + if ((Mat.M[i][0] & idM64[j]) == idM64[j]) + return 1; + else + return 0; + } + else if (j < 128) + { + if ((Mat.M[i][1] & idM64[j - 64]) == idM64[j - 64]) + return 1; + else + return 0; + } + else if (j < 192) + { + if ((Mat.M[i][2] & idM64[j - 128]) == idM64[j - 128]) + return 1; + else + return 0; + } + else + { + if ((Mat.M[i][3] & idM64[j - 192]) == idM64[j - 192]) + return 1; + else + return 0; + } +} +void flipbitM4(M4 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + (*Mat).M[i] ^= idM4[j]; +} +void flipbitM8(M8 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + (*Mat).M[i] ^= idM8[j]; +} +void flipbitM16(M16 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + (*Mat).M[i] ^= idM16[j]; +} +void flipbitM32(M32 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + (*Mat).M[i] ^= idM32[j]; +} +void flipbitM64(M64 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + (*Mat).M[i] ^= idM64[j]; +} +void flipbitM128(M128 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + if (j < 64) + { + (*Mat).M[i][0] ^= idM64[j]; + } + else + { + (*Mat).M[i][1] ^= idM64[j - 64]; + } +} +void flipbitM256(M256 *Mat, int i, int j) // flip (i, j) bit in a matrix +{ + if (j < 64) + { + (*Mat).M[i][0] ^= idM64[j]; + } + else if (j < 128) + { + (*Mat).M[i][1] ^= idM64[j - 64]; + } + else if (j < 192) + { + (*Mat).M[i][2] ^= idM64[j - 128]; + } + else + { + (*Mat).M[i][3] ^= idM64[j - 192]; + } +} +void setbitM4(M4 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM4(*Mat, i, j) == bit) + return; + else + flipbitM4(Mat, i, j); +} +void setbitM8(M8 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM8(*Mat, i, j) == bit) + return; + else + flipbitM8(Mat, i, j); +} +void setbitM16(M16 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM16(*Mat, i, j) == bit) + return; + else + flipbitM16(Mat, i, j); +} +void setbitM32(M32 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM32(*Mat, i, j) == bit) + return; + else + flipbitM32(Mat, i, j); +} +void setbitM64(M64 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM64(*Mat, i, j) == bit) + return; + else + flipbitM64(Mat, i, j); +} +void setbitM128(M128 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM128(*Mat, i, j) == bit) + return; + else + flipbitM128(Mat, i, j); +} +void setbitM256(M256 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +{ + if (readbitM256(*Mat, i, j) == bit) + return; + else + flipbitM256(Mat, i, j); +} +int isinvertM4(M4 Mat) // Invertible Matrix? +{ + int i, j, k; + uint8_t temp; + int flag; + for (i = 0; i < 4; i++) + { + if ((Mat.M[i] & idM4[i]) == idM4[i]) + { + for (j = i + 1; j < 4; j++) + { + if ((Mat.M[j] & idM4[i]) == idM4[i]) + { + Mat.M[j] ^= Mat.M[i]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 4; j++) + { + if ((Mat.M[j] & idM4[i]) == idM4[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 4; k++) + { + if ((Mat.M[k] & idM4[i]) == idM4[i]) + { + Mat.M[k] ^= Mat.M[i]; + } + } + } + } + if (Mat.M[3] == idM4[3]) + return 1; + else + return 0; +} +int isinvertM8(M8 Mat) // Invertible Matrix? +{ + int i, j, k; + uint8_t temp; + int flag; + for (i = 0; i < 8; i++) + { + if ((Mat.M[i] & idM8[i]) == idM8[i]) + { + for (j = i + 1; j < 8; j++) + { + if ((Mat.M[j] & idM8[i]) == idM8[i]) + { + Mat.M[j] ^= Mat.M[i]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 8; j++) + { + if ((Mat.M[j] & idM8[i]) == idM8[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 8; k++) + { + if ((Mat.M[k] & idM8[i]) == idM8[i]) + { + Mat.M[k] ^= Mat.M[i]; + } + } + } + } + if (Mat.M[7] == idM8[7]) + return 1; + else + return 0; +} +int isinvertM16(M16 Mat) // Invertible Matrix? +{ + int i, j, k; + uint16_t temp; + int flag; + for (i = 0; i < 16; i++) + { + if ((Mat.M[i] & idM16[i]) == idM16[i]) + { + for (j = i + 1; j < 16; j++) + { + if ((Mat.M[j] & idM16[i]) == idM16[i]) + { + Mat.M[j] ^= Mat.M[i]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 16; j++) + { + if ((Mat.M[j] & idM16[i]) == idM16[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 16; k++) + { + if ((Mat.M[k] & idM16[i]) == idM16[i]) + { + Mat.M[k] ^= Mat.M[i]; + } + } + } + } + if (Mat.M[15] == idM16[15]) + return 1; + else + return 0; +} +int isinvertM32(M32 Mat) // Invertible Matrix? +{ + int i, j, k; + uint32_t temp; + int flag; + for (i = 0; i < 32; i++) + { + if ((Mat.M[i] & idM32[i]) == idM32[i]) + { + for (j = i + 1; j < 32; j++) + { + if ((Mat.M[j] & idM32[i]) == idM32[i]) + { + Mat.M[j] ^= Mat.M[i]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 32; j++) + { + if ((Mat.M[j] & idM32[i]) == idM32[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 32; k++) + { + if ((Mat.M[k] & idM32[i]) == idM32[i]) + { + Mat.M[k] ^= Mat.M[i]; + } + } + } + } + if (Mat.M[31] == idM32[31]) + return 1; + else + return 0; +} +int isinvertM64(M64 Mat) // Invertible Matrix? +{ + int i, j, k; + uint64_t temp; + int flag; + for (i = 0; i < 64; i++) + { + if ((Mat.M[i] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 64; j++) + { + if ((Mat.M[j] & idM64[i]) == idM64[i]) + { + Mat.M[j] ^= Mat.M[i]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 64; j++) + { + if ((Mat.M[j] & idM64[i]) == idM64[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 64; k++) + { + if ((Mat.M[k] & idM64[i]) == idM64[i]) + { + Mat.M[k] ^= Mat.M[i]; + } + } + } + } + if (Mat.M[63] == idM64[63]) + return 1; + else + return 0; +} +int isinvertM128(M128 Mat) // Invertible Matrix? +{ + int i, j, k; + uint64_t temp; + int flag; + for (i = 0; i < 64; i++) + { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + Mat.M[j][0] ^= Mat.M[i][0]; + Mat.M[j][1] ^= Mat.M[i][1]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + temp = Mat.M[i][0]; + Mat.M[i][0] = Mat.M[j][0]; + Mat.M[j][0] = temp; + + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 128; k++) + { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) + { + Mat.M[k][0] ^= Mat.M[i][0]; + Mat.M[k][1] ^= Mat.M[i][1]; + } + } + } + } + for (i = 64; i < 128; i++) + { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) + { + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[j][1] ^= Mat.M[i][1]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 128; k++) + { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[k][1] ^= Mat.M[i][1]; + } + } + } + } + if (Mat.M[127][1] == idM64[63]) + return 1; + else + return 0; +} +int isinvertM256(M256 Mat) // Invertible Matrix? +{ + int i, j, k; + uint64_t temp; + int flag; + for (i = 0; i < 64; i++) + { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + Mat.M[j][0] ^= Mat.M[i][0]; + Mat.M[j][1] ^= Mat.M[i][1]; + Mat.M[j][2] ^= Mat.M[i][2]; + Mat.M[j][3] ^= Mat.M[i][3]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + temp = Mat.M[i][0]; + Mat.M[i][0] = Mat.M[j][0]; + Mat.M[j][0] = temp; + + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + + temp = Mat.M[i][2]; + Mat.M[i][2] = Mat.M[j][2]; + Mat.M[j][2] = temp; + + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) + { + Mat.M[k][0] ^= Mat.M[i][0]; + Mat.M[k][1] ^= Mat.M[i][1]; + Mat.M[k][2] ^= Mat.M[i][2]; + Mat.M[k][3] ^= Mat.M[i][3]; + } + } + } + } + for (i = 64; i < 128; i++) + { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[j][1] ^= Mat.M[i][1]; + Mat.M[j][2] ^= Mat.M[i][2]; + Mat.M[j][3] ^= Mat.M[i][3]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + + temp = Mat.M[i][2]; + Mat.M[i][2] = Mat.M[j][2]; + Mat.M[j][2] = temp; + + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[k][1] ^= Mat.M[i][1]; + Mat.M[k][2] ^= Mat.M[i][2]; + Mat.M[k][3] ^= Mat.M[i][3]; + } + } + } + } + for (i = 128; i < 192; i++) + { + if ((Mat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + Mat.M[j][2] ^= Mat.M[i][2]; + Mat.M[j][3] ^= Mat.M[i][3]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + temp = Mat.M[i][2]; + Mat.M[i][2] = Mat.M[j][2]; + Mat.M[j][2] = temp; + + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) + { + Mat.M[k][2] ^= Mat.M[i][2]; + Mat.M[k][3] ^= Mat.M[i][3]; + } + } + } + } + for (i = 192; i < 256; i++) + { + if ((Mat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + Mat.M[j][3] ^= Mat.M[i][3]; + } + } + } + else + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + flag = 0; + break; + } + } + if (flag) + return 0; + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) + { + Mat.M[k][3] ^= Mat.M[i][3]; + } + } + } + } + if (Mat.M[255][3] == idM64[63]) + return 1; + else + return 0; +} +void invsM4(M4 Mat, M4 *Mat_inv) // compute the 4*4 inverse matrix +{ + int i, j, k; + uint8_t temp; + identityM4(Mat_inv); + for (i = 0; i < 4; i++) + { + if ((Mat.M[i] & idM4[i]) == idM4[i]) + { + for (j = i + 1; j < 4; j++) + { + if ((Mat.M[j] & idM4[i]) == idM4[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + else + { + for (j = i + 1; j < 4; j++) + { + if ((Mat.M[j] & idM4[i]) == idM4[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + break; + } + } + for (k = i + 1; k < 4; k++) + { + if ((Mat.M[k] & idM4[i]) == idM4[i]) + { + Mat.M[k] ^= Mat.M[i]; + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + } + } + } + } + for (i = 3; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j] & idM4[i]) == idM4[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } +} +void invsM8(M8 Mat, M8 *Mat_inv) // compute the 8*8 inverse matrix +{ + int i, j, k; + uint8_t temp; + identityM8(Mat_inv); + for (i = 0; i < 8; i++) + { + if ((Mat.M[i] & idM8[i]) == idM8[i]) + { + for (j = i + 1; j < 8; j++) + { + if ((Mat.M[j] & idM8[i]) == idM8[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + else + { + for (j = i + 1; j < 8; j++) + { + if ((Mat.M[j] & idM8[i]) == idM8[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + break; + } + } + for (k = i + 1; k < 8; k++) + { + if ((Mat.M[k] & idM8[i]) == idM8[i]) + { + Mat.M[k] ^= Mat.M[i]; + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + } + } + } + } + for (i = 7; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j] & idM8[i]) == idM8[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } +} +void invsM16(M16 Mat, M16 *Mat_inv) // compute the 16*16 inverse matrix +{ + int i, j, k; + uint16_t temp; + identityM16(Mat_inv); + for (i = 0; i < 16; i++) + { + if ((Mat.M[i] & idM16[i]) == idM16[i]) + { + for (j = i + 1; j < 16; j++) + { + if ((Mat.M[j] & idM16[i]) == idM16[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + else + { + for (j = i + 1; j < 16; j++) + { + if ((Mat.M[j] & idM16[i]) == idM16[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + break; + } + } + for (k = i + 1; k < 16; k++) + { + if ((Mat.M[k] & idM16[i]) == idM16[i]) + { + Mat.M[k] ^= Mat.M[i]; + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + } + } + } + } + for (i = 15; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j] & idM16[i]) == idM16[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } +} +void invsM32(M32 Mat, M32 *Mat_inv) // compute the 32*32 inverse matrix +{ + int i, j, k; + uint32_t temp; + identityM32(Mat_inv); + for (i = 0; i < 32; i++) + { + if ((Mat.M[i] & idM32[i]) == idM32[i]) + { + for (j = i + 1; j < 32; j++) + { + if ((Mat.M[j] & idM32[i]) == idM32[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + else + { + for (j = i + 1; j < 32; j++) + { + if ((Mat.M[j] & idM32[i]) == idM32[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + break; + } + } + for (k = i + 1; k < 32; k++) + { + if ((Mat.M[k] & idM32[i]) == idM32[i]) + { + Mat.M[k] ^= Mat.M[i]; + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + } + } + } + } + for (i = 31; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j] & idM32[i]) == idM32[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } +} +void invsM64(M64 Mat, M64 *Mat_inv) // compute the 64*64 inverse matrix +{ + int i, j, k; + uint64_t temp; + identityM64(Mat_inv); + for (i = 0; i < 64; i++) + { + if ((Mat.M[i] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 64; j++) + { + if ((Mat.M[j] & idM64[i]) == idM64[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + else + { + for (j = i + 1; j < 64; j++) + { + if ((Mat.M[j] & idM64[i]) == idM64[i]) + { + temp = Mat.M[i]; + Mat.M[i] = Mat.M[j]; + Mat.M[j] = temp; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + break; + } + } + for (k = i + 1; k < 64; k++) + { + if ((Mat.M[k] & idM64[i]) == idM64[i]) + { + Mat.M[k] ^= Mat.M[i]; + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + } + } + } + } + for (i = 63; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j] & idM64[i]) == idM64[i]) + { + Mat.M[j] ^= Mat.M[i]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } +} +void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix +{ + int i, j, k; + uint64_t temp; + identityM128(Mat_inv); + for (i = 0; i < 64; i++) + { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + Mat.M[j][0] ^= Mat.M[i][0]; + Mat.M[j][1] ^= Mat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + } + } + } + else + { + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + temp = Mat.M[i][0]; + Mat.M[i][0] = Mat.M[j][0]; + Mat.M[j][0] = temp; + + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + break; + } + } + for (k = i + 1; k < 128; k++) + { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) + { + Mat.M[k][0] ^= Mat.M[i][0]; + Mat.M[k][1] ^= Mat.M[i][1]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + } + } + } + } + for (i = 64; i < 128; i++) + { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) + { + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[j][1] ^= Mat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + } + } + } + else + { + for (j = i + 1; j < 128; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + break; + } + } + for (k = i + 1; k < 128; k++) + { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[k][1] ^= Mat.M[i][1]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + } + } + } + } + for (i = 127; i >= 64; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[j][1] ^= Mat.M[i][1]; + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + } + } + } + for (i = 63; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + Mat.M[j][0] ^= Mat.M[i][0]; + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + } + } + } +} +void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix +{ + int i, j, k; + uint64_t temp; + identityM256(Mat_inv); + for (i = 0; i < 64; i++) // diagonal = 1? + { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + Mat.M[j][0] ^= Mat.M[i][0]; + Mat.M[j][1] ^= Mat.M[i][1]; + Mat.M[j][2] ^= Mat.M[i][2]; + Mat.M[j][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + else // swap to find 1 + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + temp = Mat.M[i][0]; + Mat.M[i][0] = Mat.M[j][0]; + Mat.M[j][0] = temp; + + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + + temp = Mat.M[i][2]; + Mat.M[i][2] = Mat.M[j][2]; + Mat.M[j][2] = temp; + + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + break; + } + } + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) + { + Mat.M[k][0] ^= Mat.M[i][0]; + Mat.M[k][1] ^= Mat.M[i][1]; + Mat.M[k][2] ^= Mat.M[i][2]; + Mat.M[k][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + } + } + } + } + for (i = 64; i < 128; i++) // diagonal = 1? + { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[j][1] ^= Mat.M[i][1]; + Mat.M[j][2] ^= Mat.M[i][2]; + Mat.M[j][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + else // swap to find 1 + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + temp = Mat.M[i][1]; + Mat.M[i][1] = Mat.M[j][1]; + Mat.M[j][1] = temp; + + temp = Mat.M[i][2]; + Mat.M[i][2] = Mat.M[j][2]; + Mat.M[j][2] = temp; + + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + break; + } + } + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[k][1] ^= Mat.M[i][1]; + Mat.M[k][2] ^= Mat.M[i][2]; + Mat.M[k][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + } + } + } + } + for (i = 128; i < 192; i++) // diagonal = 1? + { + if ((Mat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + Mat.M[j][2] ^= Mat.M[i][2]; + Mat.M[j][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + else // swap to find 1 + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + temp = Mat.M[i][2]; + Mat.M[i][2] = Mat.M[j][2]; + Mat.M[j][2] = temp; + + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + break; + } + } + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) + { + Mat.M[k][2] ^= Mat.M[i][2]; + Mat.M[k][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + } + } + } + } + for (i = 192; i < 256; i++) // diagonal = 1? + { + if ((Mat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + Mat.M[j][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + else // swap to find 1 + { + for (j = i + 1; j < 256; j++) + { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + temp = Mat.M[i][3]; + Mat.M[i][3] = Mat.M[j][3]; + Mat.M[j][3] = temp; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + break; + } + } + for (k = i + 1; k < 256; k++) + { + if ((Mat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) + { + Mat.M[k][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + } + } + } + } + for (i = 255; i >= 192; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + Mat.M[j][3] ^= Mat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + for (i = 191; i >= 128; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + Mat.M[j][2] ^= Mat.M[i][2]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + for (i = 127; i >= 64; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + Mat.M[j][1] ^= Mat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + for (i = 63; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) + { + Mat.M[j][0] ^= Mat.M[i][0]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } +} +uint8_t affineU4(Aff4 aff, uint8_t arr) // 4bits affine transformation +{ + V4 mul_vec, ans_vec; + mul_vec.V = arr; + MatMulVecM4(aff.Mat, mul_vec, &ans_vec); // mul + return ans_vec.V ^ aff.Vec.V; // add +} +uint8_t affineU8(Aff8 aff, uint8_t arr) // 8bits affine transformation +{ + V8 mul_vec, ans_vec; + mul_vec.V = arr; + MatMulVecM8(aff.Mat, mul_vec, &ans_vec); // mul + return ans_vec.V ^ aff.Vec.V; // add +} +uint16_t affineU16(Aff16 aff, uint16_t arr) // 16bits affine transformation +{ + V16 mul_vec, ans_vec; + mul_vec.V = arr; + MatMulVecM16(aff.Mat, mul_vec, &ans_vec); // mul + return ans_vec.V ^ aff.Vec.V; // add +} +uint32_t affineU32(Aff32 aff, uint32_t arr) // 32bits affine transformation +{ + V32 mul_vec, ans_vec; + mul_vec.V = arr; + MatMulVecM32(aff.Mat, mul_vec, &ans_vec); // mul + return ans_vec.V ^ aff.Vec.V; // add +} +uint64_t affineU64(Aff64 aff, uint64_t arr) // 64bits affine transformation +{ + V64 mul_vec, ans_vec; + mul_vec.V = arr; + MatMulVecM64(aff.Mat, mul_vec, &ans_vec); // mul + return ans_vec.V ^ aff.Vec.V; // add +} +void affineU128(Aff128 aff, uint64_t arr[], uint64_t ans[]) // 128bits affine transformation +{ + V128 mul_vec, ans_vec; + mul_vec.V[0] = arr[0]; + mul_vec.V[1] = arr[1]; + MatMulVecM128(aff.Mat, mul_vec, &ans_vec); // mul + ans[0] = ans_vec.V[0] ^ aff.Vec.V[0]; // add + ans[1] = ans_vec.V[1] ^ aff.Vec.V[1]; +} +int xorU4(uint8_t n) // 4bits internal xor +{ + if (xor[n]) + return 1; + else + return 0; +} +int xorU8(uint8_t n) // uint8_t internal xor +{ + if (xor[n]) + return 1; + else + return 0; +} +int xorU16(uint16_t n) // uint16_t internal xor +{ + uint8_t temp = 0; + uint8_t *u = (uint8_t *)&n; + temp = (*u) ^ (*(u + 1)); + if (xorU8(temp)) + return 1; + else + return 0; +} +int xorU32(uint32_t n) // uint32_t internal xor +{ + uint16_t temp = 0; + uint16_t *u = (uint16_t *)&n; + temp = (*u) ^ (*(u + 1)); + if (xorU16(temp)) + return 1; + else + return 0; +} +int xorU64(uint64_t n) // uint64_t internal xor +{ + uint32_t temp = 0; + uint32_t *u = (uint32_t *)&n; + temp = (*u) ^ (*(u + 1)); + if (xorU32(temp)) + return 1; + else + return 0; +} +int xorU128(uint64_t n[]) // uint128_t internal xor +{ + uint64_t temp = 0; + temp = n[0] ^ n[1]; + if (xorU64(temp)) + return 1; + else + return 0; +} +int xorU256(uint64_t n[]) // uint256_t internal xor +{ + uint64_t temp = 0; + temp = n[0] ^ n[1] ^ n[2] ^ n[3]; + if (xorU64(temp)) + return 1; + else + return 0; +} +int HWU4(uint8_t n) // 4bits HW +{ + return HW[n]; +} +int HWU8(uint8_t n) // uint8_t HW +{ + return HW[n]; +} +int HWU16(uint16_t n) // uint16_t HW +{ + uint8_t *u = (uint8_t *)&n; + return HWU8(*u) + HWU8(*(u + 1)); +} +int HWU32(uint32_t n) // uint32_t HW +{ + uint16_t *u = (uint16_t *)&n; + return HWU16(*u) + HWU16(*(u + 1)); +} +int HWU64(uint64_t n) // uint64_t HW +{ + uint32_t *u = (uint32_t *)&n; + return HWU32(*u) + HWU32(*(u + 1)); +} +int HWU128(uint64_t n[]) // uint128_t HW +{ + return HWU64(n[0]) + HWU64(n[1]); +} +void printU8(uint8_t n) // printf uint8_t +{ + printf("0x%x\n", n); +} +void printU16(uint16_t n) // printf uint16_t +{ + printf("0x%x\n", n); +} +void printU32(uint32_t n) // printf uint32_t +{ + printf("0x%x\n", n); +} +void printU64(uint64_t n) // printf uint64_t +{ + printf("0x%" PRIx64 "\n", n); +} +void printU128(uint64_t n[]) // printf uint128_t +{ + printf("0x%" PRIx64 " ", n[0]); + printf("0x%" PRIx64 "\n", n[1]); +} +void printbitM4(M4 Mat) // printf Matrix 4*4 in the form of bits +{ + int i, j; + uint8_t temp; + for (i = 0; i < 4; i++) + { + temp = Mat.M[i]; + for (j = 0; j < 4; j++) + { + if (temp & 0x08) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + printf("\n"); + } + printf("\n"); +} +void printbitM8(M8 Mat) // printf Matrix 8*8 in the form of bits +{ + int i, j; + uint8_t temp; + for (i = 0; i < 8; i++) + { + temp = Mat.M[i]; + for (j = 0; j < 8; j++) + { + if (temp & 0x80) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + printf("\n"); + } + printf("\n"); +} +void printbitM16(M16 Mat) // printf Matrix 16*16 in the form of bits +{ + int i, j; + uint16_t temp; + for (i = 0; i < 16; i++) + { + temp = Mat.M[i]; + for (j = 0; j < 16; j++) + { + if (temp & 0x8000) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + printf("\n"); + } + printf("\n"); +} +void printbitM32(M32 Mat) // printf Matrix 32*32 in the form of bits +{ + int i, j; + uint32_t temp; + for (i = 0; i < 32; i++) + { + temp = Mat.M[i]; + for (j = 0; j < 32; j++) + { + if (temp & 0x80000000) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + printf("\n"); + } + printf("\n"); +} +void printbitM64(M64 Mat) // printf Matrix 64*64 in the form of bits +{ + int i, j; + uint64_t temp; + for (i = 0; i < 64; i++) + { + temp = Mat.M[i]; + for (j = 0; j < 64; j++) + { + if (temp & 0x8000000000000000) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + printf("\n"); + } + printf("\n"); +} +void printbitM128(M128 Mat) // printf Matrix 128*128 in the form of bits +{ + int i, j; + uint64_t temp; + for (i = 0; i < 128; i++) + { + temp = Mat.M[i][0]; + for (j = 0; j < 64; j++) + { + if (temp & 0x8000000000000000) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + temp = Mat.M[i][1]; + for (j = 0; j < 64; j++) + { + if (temp & 0x8000000000000000) + printf("%d ", 1); + else + printf("%d ", 0); + temp = temp << 1; + } + printf("\n"); + } + printf("\n"); +} +void VecAddVecV4(V4 Vec1, V4 Vec2, V4 *Vec) +{ + (*Vec).V = Vec1.V ^ Vec2.V; +} +void VecAddVecV8(V8 Vec1, V8 Vec2, V8 *Vec) +{ + (*Vec).V = Vec1.V ^ Vec2.V; +} +void VecAddVecV16(V16 Vec1, V16 Vec2, V16 *Vec) +{ + (*Vec).V = Vec1.V ^ Vec2.V; +} +void VecAddVecV32(V32 Vec1, V32 Vec2, V32 *Vec) +{ + (*Vec).V = Vec1.V ^ Vec2.V; +} +void VecAddVecV64(V64 Vec1, V64 Vec2, V64 *Vec) +{ + (*Vec).V = Vec1.V ^ Vec2.V; +} +void VecAddVecV128(V128 Vec1, V128 Vec2, V128 *Vec) +{ + (*Vec).V[0] = Vec1.V[0] ^ Vec2.V[0]; + (*Vec).V[1] = Vec1.V[1] ^ Vec2.V[1]; +} +void VecAddVecV256(V256 Vec1, V256 Vec2, V256 *Vec) +{ + (*Vec).V[0] = Vec1.V[0] ^ Vec2.V[0]; + (*Vec).V[1] = Vec1.V[1] ^ Vec2.V[1]; + (*Vec).V[2] = Vec1.V[2] ^ Vec2.V[2]; + (*Vec).V[3] = Vec1.V[3] ^ Vec2.V[3]; +} +uint8_t MatMulNumM4(M4 Mat, uint8_t n) // matrix * number -> number 4bits +{ + int i; + uint8_t temp = 0; + for (i = 0; i < 4; i++) + { + if (xorU4(Mat.M[i] & n & 0x0f)) + temp ^= idM4[i]; + } + return temp; +} +uint8_t MatMulNumM8(M8 Mat, uint8_t n) // matrix * number -> number 8bits +{ + int i; + uint8_t temp = 0; + for (i = 0; i < 8; i++) + { + if (xorU8(Mat.M[i] & n)) + temp ^= idM8[i]; + } + return temp; +} +uint16_t MatMulNumM16(M16 Mat, uint16_t n) // matrix * number -> number 16bits +{ + int i; + uint16_t temp = 0; + for (i = 0; i < 16; i++) + { + if (xorU16(Mat.M[i] & n)) + temp ^= idM16[i]; + } + return temp; +} +uint32_t MatMulNumM32(M32 Mat, uint32_t n) // matrix * number -> number 32bits +{ + int i; + uint32_t temp = 0; + for (i = 0; i < 32; i++) + { + if (xorU32(Mat.M[i] & n)) + temp ^= idM32[i]; + } + return temp; +} +uint64_t MatMulNumM64(M64 Mat, uint64_t n) // matrix * number -> number 64bits +{ + int i; + uint64_t temp = 0; + for (i = 0; i < 64; i++) + { + if (xorU64(Mat.M[i] & n)) + temp ^= idM64[i]; + } + return temp; +} +void MatMulVecM4(M4 Mat, V4 Vec, V4 *ans) // matrix * vector -> vector 4*1 +{ + int i; + initV4(ans); + for (i = 0; i < 4; i++) + { + if (xorU4(Mat.M[i] & Vec.V & 0x0f)) + (*ans).V ^= idM4[i]; + } +} +void MatMulVecM8(M8 Mat, V8 Vec, V8 *ans) // matrix * vector -> vector 8*1 +{ + int i; + initV8(ans); + for (i = 0; i < 8; i++) + { + if (xorU8(Mat.M[i] & Vec.V)) + (*ans).V ^= idM8[i]; + } +} +void MatMulVecM16(M16 Mat, V16 Vec, V16 *ans) // matrix * vector -> vector 16*1 +{ + int i; + initV16(ans); + for (i = 0; i < 16; i++) + { + if (xorU16(Mat.M[i] & Vec.V)) + (*ans).V ^= idM16[i]; + } +} +void MatMulVecM32(M32 Mat, V32 Vec, V32 *ans) // matrix * vector -> vector 32*1 +{ + int i; + initV32(ans); + for (i = 0; i < 32; i++) + { + if (xorU32(Mat.M[i] & Vec.V)) + (*ans).V ^= idM32[i]; + } +} +void MatMulVecM64(M64 Mat, V64 Vec, V64 *ans) // matrix * vector -> vector 64*1 +{ + int i; + initV64(ans); + for (i = 0; i < 64; i++) + { + if (xorU64(Mat.M[i] & Vec.V)) + (*ans).V ^= idM64[i]; + } +} +void MatMulVecM128(M128 Mat, V128 Vec, V128 *ans) // matrix * vector -> vector 128*1 +{ + int i; + initV128(ans); + uint64_t temp[2]; + for (i = 0; i < 64; i++) + { + temp[0] = Mat.M[i][0] & Vec.V[0]; + temp[1] = Mat.M[i][1] & Vec.V[1]; + if (xorU128(temp)) + (*ans).V[0] ^= idM64[i]; + } + for (i = 64; i < 128; i++) + { + temp[0] = Mat.M[i][0] & Vec.V[0]; + temp[1] = Mat.M[i][1] & Vec.V[1]; + if (xorU128(temp)) + (*ans).V[1] ^= idM64[i - 64]; + } +} +void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) // matrix * vector -> vector 256*1 +{ + int i; + initV256(ans); + uint64_t temp[4]; + for (i = 0; i < 64; i++) + { + temp[0] = Mat.M[i][0] & Vec.V[0]; + temp[1] = Mat.M[i][1] & Vec.V[1]; + temp[2] = Mat.M[i][2] & Vec.V[2]; + temp[3] = Mat.M[i][3] & Vec.V[3]; + if (xorU256(temp)) + (*ans).V[0] ^= idM64[i]; + } + for (i = 64; i < 128; i++) + { + temp[0] = Mat.M[i][0] & Vec.V[0]; + temp[1] = Mat.M[i][1] & Vec.V[1]; + temp[2] = Mat.M[i][2] & Vec.V[2]; + temp[3] = Mat.M[i][3] & Vec.V[3]; + if (xorU256(temp)) + (*ans).V[1] ^= idM64[i - 64]; + } + for (i = 128; i < 192; i++) + { + temp[0] = Mat.M[i][0] & Vec.V[0]; + temp[1] = Mat.M[i][1] & Vec.V[1]; + temp[2] = Mat.M[i][2] & Vec.V[2]; + temp[3] = Mat.M[i][3] & Vec.V[3]; + if (xorU256(temp)) + (*ans).V[2] ^= idM64[i - 128]; + } + for (i = 192; i < 256; i++) + { + temp[0] = Mat.M[i][0] & Vec.V[0]; + temp[1] = Mat.M[i][1] & Vec.V[1]; + temp[2] = Mat.M[i][2] & Vec.V[2]; + temp[3] = Mat.M[i][3] & Vec.V[3]; + if (xorU256(temp)) + (*ans).V[3] ^= idM64[i - 192]; + } +} +void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M4 tempMat; + M4 resultMat; + uint8_t temp; + uint8_t trail[16][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM4(Mat); + identityM4(Mat_inv); + randM4(&tempMat); + copyM4(tempMat, &resultMat); + for (i = 0; i < 4; i++) // diagonal = 1? + { + if ((tempMat.M[i] & idM4[i]) == idM4[i]) + { + for (j = i + 1; j < 4; j++) + { + if ((tempMat.M[j] & idM4[i]) == idM4[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 4; j++) + { + if ((tempMat.M[j] & idM4[i]) == idM4[i]) + { + temp = tempMat.M[i]; + tempMat.M[i] = tempMat.M[j]; + tempMat.M[j] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 3) + { + p = i + 1 + cus_random() % (3 - i); // swap + temp = tempMat.M[p]; + tempMat.M[p] = tempMat.M[i]; + tempMat.M[i] = temp; + temp = (*Mat_inv).M[p]; + (*Mat_inv).M[p] = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = temp; + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + for (t = i + 1; t < 4; t++) + { + if (cus_random() % 2) + { + tempMat.M[t] ^= tempMat.M[i]; + (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 4; k++) + { + if ((tempMat.M[k] & idM4[i]) == idM4[i]) + { + tempMat.M[k] ^= tempMat.M[i]; + + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 3; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM4[t]) == idM4[t]) + { + tempMat.M[j] ^= tempMat.M[t]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]]; + (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; + (*Mat).M[trail[j][2]] = temp; + } + } + } + else // invertible + { + for (i = 3; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM4[i]) == idM4[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + copyM4(resultMat, Mat); + } +} +void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M8 tempMat; + M8 resultMat; + uint8_t temp; + uint8_t trail[64][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM8(Mat); + identityM8(Mat_inv); + randM8(&tempMat); + copyM8(tempMat, &resultMat); + for (i = 0; i < 8; i++) // diagonal = 1? + { + if ((tempMat.M[i] & idM8[i]) == idM8[i]) + { + for (j = i + 1; j < 8; j++) + { + if ((tempMat.M[j] & idM8[i]) == idM8[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 8; j++) + { + if ((tempMat.M[j] & idM8[i]) == idM8[i]) + { + temp = tempMat.M[i]; + tempMat.M[i] = tempMat.M[j]; + tempMat.M[j] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 7) + { + p = i + 1 + cus_random() % (7 - i); // swap + temp = tempMat.M[p]; + tempMat.M[p] = tempMat.M[i]; + tempMat.M[i] = temp; + temp = (*Mat_inv).M[p]; + (*Mat_inv).M[p] = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = temp; + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + for (t = i + 1; t < 8; t++) + { + if (cus_random() % 2) + { + tempMat.M[t] ^= tempMat.M[i]; + (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 8; k++) + { + if ((tempMat.M[k] & idM8[i]) == idM8[i]) + { + tempMat.M[k] ^= tempMat.M[i]; + + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 7; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM8[t]) == idM8[t]) + { + tempMat.M[j] ^= tempMat.M[t]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]]; + (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; + (*Mat).M[trail[j][2]] = temp; + } + } + } + else // invertible + { + for (i = 7; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM8[i]) == idM8[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + copyM8(resultMat, Mat); + } +} +void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M16 tempMat; + M16 resultMat; + uint16_t temp; + uint8_t trail[256][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM16(Mat); + identityM16(Mat_inv); + randM16(&tempMat); + copyM16(tempMat, &resultMat); + for (i = 0; i < 16; i++) // diagonal = 1? + { + if ((tempMat.M[i] & idM16[i]) == idM16[i]) + { + for (j = i + 1; j < 16; j++) + { + if ((tempMat.M[j] & idM16[i]) == idM16[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 16; j++) + { + if ((tempMat.M[j] & idM16[i]) == idM16[i]) + { + temp = tempMat.M[i]; + tempMat.M[i] = tempMat.M[j]; + tempMat.M[j] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 15) + { + p = i + 1 + cus_random() % (15 - i); // swap + temp = tempMat.M[p]; + tempMat.M[p] = tempMat.M[i]; + tempMat.M[i] = temp; + temp = (*Mat_inv).M[p]; + (*Mat_inv).M[p] = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = temp; + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + for (t = i + 1; t < 16; t++) + { + if (cus_random() % 2) + { + tempMat.M[t] ^= tempMat.M[i]; + (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 16; k++) + { + if ((tempMat.M[k] & idM16[i]) == idM16[i]) + { + tempMat.M[k] ^= tempMat.M[i]; + + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 15; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM16[t]) == idM16[t]) + { + tempMat.M[j] ^= tempMat.M[t]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]]; + (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; + (*Mat).M[trail[j][2]] = temp; + } + } + } + else // invertible + { + for (i = 15; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM16[i]) == idM16[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + copyM16(resultMat, Mat); + } +} +void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M32 tempMat; + M32 resultMat; + uint32_t temp; + uint8_t trail[1024][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM32(Mat); + identityM32(Mat_inv); + randM32(&tempMat); + copyM32(tempMat, &resultMat); + for (i = 0; i < 32; i++) // diagonal = 1? + { + if ((tempMat.M[i] & idM32[i]) == idM32[i]) + { + for (j = i + 1; j < 32; j++) + { + if ((tempMat.M[j] & idM32[i]) == idM32[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 32; j++) + { + if ((tempMat.M[j] & idM32[i]) == idM32[i]) + { + temp = tempMat.M[i]; + tempMat.M[i] = tempMat.M[j]; + tempMat.M[j] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 31) + { + p = i + 1 + cus_random() % (31 - i); // swap + temp = tempMat.M[p]; + tempMat.M[p] = tempMat.M[i]; + tempMat.M[i] = temp; + temp = (*Mat_inv).M[p]; + (*Mat_inv).M[p] = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = temp; + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + for (t = i + 1; t < 32; t++) + { + if (cus_random() % 2) + { + tempMat.M[t] ^= tempMat.M[i]; + (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 32; k++) + { + if ((tempMat.M[k] & idM32[i]) == idM32[i]) + { + tempMat.M[k] ^= tempMat.M[i]; + + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 31; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM32[t]) == idM32[t]) + { + tempMat.M[j] ^= tempMat.M[t]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]]; + (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; + (*Mat).M[trail[j][2]] = temp; + } + } + } + else // invertible + { + for (i = 31; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM32[i]) == idM32[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + copyM32(resultMat, Mat); + } +} +void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M64 tempMat; + M64 resultMat; + uint64_t temp; + uint8_t trail[4096][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM64(Mat); + identityM64(Mat_inv); + randM64(&tempMat); + copyM64(tempMat, &resultMat); + for (i = 0; i < 64; i++) // diagonal = 1? + { + if ((tempMat.M[i] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 64; j++) + { + if ((tempMat.M[j] & idM64[i]) == idM64[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 64; j++) + { + if ((tempMat.M[j] & idM64[i]) == idM64[i]) + { + temp = tempMat.M[i]; + tempMat.M[i] = tempMat.M[j]; + tempMat.M[j] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = (*Mat_inv).M[j]; + (*Mat_inv).M[j] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 63) + { + p = i + 1 + cus_random() % (63 - i); // swap + temp = tempMat.M[p]; + tempMat.M[p] = tempMat.M[i]; + tempMat.M[i] = temp; + temp = (*Mat_inv).M[p]; + (*Mat_inv).M[p] = (*Mat_inv).M[i]; + (*Mat_inv).M[i] = temp; + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + for (t = i + 1; t < 64; t++) + { + if (cus_random() % 2) + { + tempMat.M[t] ^= tempMat.M[i]; + (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 64; k++) + { + if ((tempMat.M[k] & idM64[i]) == idM64[i]) + { + tempMat.M[k] ^= tempMat.M[i]; + + (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 63; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM64[t]) == idM64[t]) + { + tempMat.M[j] ^= tempMat.M[t]; + (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]]; + (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; + (*Mat).M[trail[j][2]] = temp; + } + } + } + else // invertible + { + for (i = 63; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j] & idM64[i]) == idM64[i]) + { + tempMat.M[j] ^= tempMat.M[i]; + + (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; + } + } + } + copyM64(resultMat, Mat); + } +} +void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M128 tempMat; + M128 resultMat; + uint64_t temp; + uint8_t trail[16384][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM128(Mat); + identityM128(Mat_inv); + randM128(&tempMat); + copyM128(tempMat, &resultMat); + for (i = 0; i < 64; i++) // diagonal = 1? + { + if ((tempMat.M[i][0] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 128; j++) + { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) + { + tempMat.M[j][0] ^= tempMat.M[i][0]; + tempMat.M[j][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 128; j++) + { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) + { + temp = tempMat.M[i][0]; + tempMat.M[i][0] = tempMat.M[j][0]; + tempMat.M[j][0] = temp; + + temp = tempMat.M[i][1]; + tempMat.M[i][1] = tempMat.M[j][1]; + tempMat.M[j][1] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + p = i + 1 + cus_random() % (127 - i); // swap + + temp = tempMat.M[p][0]; + tempMat.M[p][0] = tempMat.M[i][0]; + tempMat.M[i][0] = temp; + + temp = tempMat.M[p][1]; + tempMat.M[p][1] = tempMat.M[i][1]; + tempMat.M[i][1] = temp; + + temp = (*Mat_inv).M[p][0]; + (*Mat_inv).M[p][0] = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = temp; + + temp = (*Mat_inv).M[p][1]; + (*Mat_inv).M[p][1] = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = temp; + + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + + for (t = i + 1; t < 128; t++) + { + if (cus_random() % 2) + { + tempMat.M[t][0] ^= tempMat.M[i][0]; + tempMat.M[t][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[t][1] ^= (*Mat_inv).M[i][1]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + else // can still contiune + { + for (k = i + 1; k < 128; k++) + { + if ((tempMat.M[k][0] & idM64[i]) == idM64[i]) + { + tempMat.M[k][0] ^= tempMat.M[i][0]; + tempMat.M[k][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + for (i = 64; i < 128; i++) // diagonal = 1? + { + if ((tempMat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) + { + for (j = i + 1; j < 128; j++) + { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + tempMat.M[j][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 128; j++) + { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + temp = tempMat.M[i][1]; + tempMat.M[i][1] = tempMat.M[j][1]; + tempMat.M[j][1] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 127) + { + p = i + 1 + cus_random() % (127 - i); // swap + + temp = tempMat.M[p][1]; + tempMat.M[p][1] = tempMat.M[i][1]; + tempMat.M[i][1] = temp; + + temp = (*Mat_inv).M[p][0]; + (*Mat_inv).M[p][0] = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = temp; + + temp = (*Mat_inv).M[p][1]; + (*Mat_inv).M[p][1] = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = temp; + + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + + for (t = i + 1; t < 128; t++) + { + if (cus_random() % 2) + { + tempMat.M[t][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[t][1] ^= (*Mat_inv).M[i][1]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 128; k++) + { + if ((tempMat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) + { + tempMat.M[k][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 127; t >= 64; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j][1] & idM64[t - 64]) == idM64[t - 64]) + { + tempMat.M[j][1] ^= tempMat.M[t][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[t][1]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + for (t = 63; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j][0] & idM64[t]) == idM64[t]) + { + tempMat.M[j][0] ^= tempMat.M[t][0]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[t][1]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]][0] ^= (*Mat).M[trail[j][2]][0]; + (*Mat).M[trail[j][1]][1] ^= (*Mat).M[trail[j][2]][1]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]][0]; + (*Mat).M[trail[j][1]][0] = (*Mat).M[trail[j][2]][0]; + (*Mat).M[trail[j][2]][0] = temp; + + temp = (*Mat).M[trail[j][1]][1]; + (*Mat).M[trail[j][1]][1] = (*Mat).M[trail[j][2]][1]; + (*Mat).M[trail[j][2]][1] = temp; + } + } + } + else // invertible + { + for (i = 127; i >= 64; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + tempMat.M[j][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + } + } + } + for (i = 63; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) + { + tempMat.M[j][0] ^= tempMat.M[i][0]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + } + } + } + copyM128(resultMat, Mat); + } +} +void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible matrix and its inverse matrix +{ + int i, j, t, k; + int p; + M256 tempMat; + M256 resultMat; + uint64_t temp; + uint8_t trail[65536][3]; // generate trail + int flag = 0; + int times = 0; + int invertible = 1; + // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + identityM256(Mat); + identityM256(Mat_inv); + randM256(&tempMat); + copyM256(tempMat, &resultMat); + for (i = 0; i < 64; i++) // diagonal = 1? + { + if ((tempMat.M[i][0] & idM64[i]) == idM64[i]) + { + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) + { + tempMat.M[j][0] ^= tempMat.M[i][0]; + tempMat.M[j][1] ^= tempMat.M[i][1]; + tempMat.M[j][2] ^= tempMat.M[i][2]; + tempMat.M[j][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) + { + temp = tempMat.M[i][0]; + tempMat.M[i][0] = tempMat.M[j][0]; + tempMat.M[j][0] = temp; + + temp = tempMat.M[i][1]; + tempMat.M[i][1] = tempMat.M[j][1]; + tempMat.M[j][1] = temp; + + temp = tempMat.M[i][2]; + tempMat.M[i][2] = tempMat.M[j][2]; + tempMat.M[j][2] = temp; + + temp = tempMat.M[i][3]; + tempMat.M[i][3] = tempMat.M[j][3]; + tempMat.M[j][3] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + p = i + 1 + cus_random() % (255 - i); // swap + + temp = tempMat.M[p][0]; + tempMat.M[p][0] = tempMat.M[i][0]; + tempMat.M[i][0] = temp; + + temp = tempMat.M[p][1]; + tempMat.M[p][1] = tempMat.M[i][1]; + tempMat.M[i][1] = temp; + + temp = tempMat.M[p][2]; + tempMat.M[p][2] = tempMat.M[i][2]; + tempMat.M[i][2] = temp; + + temp = tempMat.M[p][3]; + tempMat.M[p][3] = tempMat.M[i][3]; + tempMat.M[i][3] = temp; + + temp = (*Mat_inv).M[p][0]; + (*Mat_inv).M[p][0] = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = temp; + + temp = (*Mat_inv).M[p][1]; + (*Mat_inv).M[p][1] = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = temp; + + temp = (*Mat_inv).M[p][2]; + (*Mat_inv).M[p][2] = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = temp; + + temp = (*Mat_inv).M[p][3]; + (*Mat_inv).M[p][3] = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = temp; + + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + + for (t = i + 1; t < 256; t++) + { + if (cus_random() % 2) + { + tempMat.M[t][0] ^= tempMat.M[i][0]; + tempMat.M[t][1] ^= tempMat.M[i][1]; + tempMat.M[t][2] ^= tempMat.M[i][2]; + tempMat.M[t][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[t][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[t][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[t][3] ^= (*Mat_inv).M[i][3]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + else // can still contiune + { + for (k = i + 1; k < 256; k++) + { + if ((tempMat.M[k][0] & idM64[i]) == idM64[i]) + { + tempMat.M[k][0] ^= tempMat.M[i][0]; + tempMat.M[k][1] ^= tempMat.M[i][1]; + tempMat.M[k][2] ^= tempMat.M[i][2]; + tempMat.M[k][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + for (i = 64; i < 128; i++) // diagonal = 1? + { + if ((tempMat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) + { + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + tempMat.M[j][1] ^= tempMat.M[i][1]; + tempMat.M[j][2] ^= tempMat.M[i][2]; + tempMat.M[j][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + temp = tempMat.M[i][1]; + tempMat.M[i][1] = tempMat.M[j][1]; + tempMat.M[j][1] = temp; + + temp = tempMat.M[i][2]; + tempMat.M[i][2] = tempMat.M[j][2]; + tempMat.M[j][2] = temp; + + temp = tempMat.M[i][3]; + tempMat.M[i][3] = tempMat.M[j][3]; + tempMat.M[j][3] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + // if(i < 127) + { + p = i + 1 + cus_random() % (255 - i); // swap + + temp = tempMat.M[p][1]; + tempMat.M[p][1] = tempMat.M[i][1]; + tempMat.M[i][1] = temp; + + temp = tempMat.M[p][2]; + tempMat.M[p][2] = tempMat.M[i][2]; + tempMat.M[i][2] = temp; + + temp = tempMat.M[p][3]; + tempMat.M[p][3] = tempMat.M[i][3]; + tempMat.M[i][3] = temp; + + temp = (*Mat_inv).M[p][0]; + (*Mat_inv).M[p][0] = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = temp; + + temp = (*Mat_inv).M[p][1]; + (*Mat_inv).M[p][1] = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = temp; + + temp = (*Mat_inv).M[p][2]; + (*Mat_inv).M[p][2] = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = temp; + + temp = (*Mat_inv).M[p][3]; + (*Mat_inv).M[p][3] = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = temp; + + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + + for (t = i + 1; t < 256; t++) + { + if (cus_random() % 2) + { + tempMat.M[t][1] ^= tempMat.M[i][1]; + tempMat.M[t][2] ^= tempMat.M[i][2]; + tempMat.M[t][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[t][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[t][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[t][3] ^= (*Mat_inv).M[i][3]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 256; k++) + { + if ((tempMat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) + { + tempMat.M[k][1] ^= tempMat.M[i][1]; + tempMat.M[k][2] ^= tempMat.M[i][2]; + tempMat.M[k][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + for (i = 128; i < 192; i++) // diagonal = 1? + { + if ((tempMat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) + { + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + tempMat.M[j][2] ^= tempMat.M[i][2]; + tempMat.M[j][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + temp = tempMat.M[i][2]; + tempMat.M[i][2] = tempMat.M[j][2]; + tempMat.M[j][2] = temp; + + temp = tempMat.M[i][3]; + tempMat.M[i][3] = tempMat.M[j][3]; + tempMat.M[j][3] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + // if(i < 127) + { + p = i + 1 + cus_random() % (255 - i); // swap + + temp = tempMat.M[p][2]; + tempMat.M[p][2] = tempMat.M[i][2]; + tempMat.M[i][2] = temp; + + temp = tempMat.M[p][3]; + tempMat.M[p][3] = tempMat.M[i][3]; + tempMat.M[i][3] = temp; + + temp = (*Mat_inv).M[p][0]; + (*Mat_inv).M[p][0] = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = temp; + + temp = (*Mat_inv).M[p][1]; + (*Mat_inv).M[p][1] = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = temp; + + temp = (*Mat_inv).M[p][2]; + (*Mat_inv).M[p][2] = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = temp; + + temp = (*Mat_inv).M[p][3]; + (*Mat_inv).M[p][3] = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = temp; + + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + + for (t = i + 1; t < 256; t++) + { + if (cus_random() % 2) + { + tempMat.M[t][2] ^= tempMat.M[i][2]; + tempMat.M[t][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[t][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[t][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[t][3] ^= (*Mat_inv).M[i][3]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 256; k++) + { + if ((tempMat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) + { + tempMat.M[k][2] ^= tempMat.M[i][2]; + tempMat.M[k][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + for (i = 192; i < 256; i++) // diagonal = 1? + { + if ((tempMat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) + { + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + tempMat.M[j][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = i; + times++; + } + } + } + else // swap to find 1 + { + flag = 1; + for (j = i + 1; j < 256; j++) + { + if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + temp = tempMat.M[i][3]; + tempMat.M[i][3] = tempMat.M[j][3]; + tempMat.M[j][3] = temp; + + flag = 0; + + temp = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = (*Mat_inv).M[j][0]; + (*Mat_inv).M[j][0] = temp; + + temp = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = (*Mat_inv).M[j][1]; + (*Mat_inv).M[j][1] = temp; + + temp = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = (*Mat_inv).M[j][2]; + (*Mat_inv).M[j][2] = temp; + + temp = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = (*Mat_inv).M[j][3]; + (*Mat_inv).M[j][3] = temp; + + trail[times][0] = 0; + trail[times][1] = j; + trail[times][2] = i; + times++; + break; + } + } + if (flag) // can not find 1 which means not invertible + { + invertible = 0; + if (i < 255) + { + p = i + 1 + cus_random() % (255 - i); // swap + + temp = tempMat.M[p][3]; + tempMat.M[p][3] = tempMat.M[i][3]; + tempMat.M[i][3] = temp; + + temp = (*Mat_inv).M[p][0]; + (*Mat_inv).M[p][0] = (*Mat_inv).M[i][0]; + (*Mat_inv).M[i][0] = temp; + + temp = (*Mat_inv).M[p][1]; + (*Mat_inv).M[p][1] = (*Mat_inv).M[i][1]; + (*Mat_inv).M[i][1] = temp; + + temp = (*Mat_inv).M[p][2]; + (*Mat_inv).M[p][2] = (*Mat_inv).M[i][2]; + (*Mat_inv).M[i][2] = temp; + + temp = (*Mat_inv).M[p][3]; + (*Mat_inv).M[p][3] = (*Mat_inv).M[i][3]; + (*Mat_inv).M[i][3] = temp; + + trail[times][0] = 0; + trail[times][1] = p; + trail[times][2] = i; + times++; + + for (t = i + 1; t < 256; t++) + { + if (cus_random() % 2) + { + tempMat.M[t][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[t][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[t][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[t][3] ^= (*Mat_inv).M[i][3]; + trail[times][0] = 1; + trail[times][1] = t; + trail[times][2] = i; + times++; + } + } + } + } + else // can still contiune + { + for (k = i + 1; k < 256; k++) + { + if ((tempMat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) + { + tempMat.M[k][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[k][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[k][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[k][3] ^= (*Mat_inv).M[i][3]; + + trail[times][0] = 1; + trail[times][1] = k; + trail[times][2] = i; + times++; + } + } + } + } + } + if (!invertible) // not invertible + { + for (t = 255; t >= 192; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j][3] & idM64[t - 192]) == idM64[t - 192]) + { + tempMat.M[j][3] ^= tempMat.M[t][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[t][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[t][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[t][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + for (t = 191; t >= 128; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j][2] & idM64[t - 128]) == idM64[t - 128]) + { + tempMat.M[j][2] ^= tempMat.M[t][2]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[t][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[t][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[t][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + for (t = 127; t >= 64; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j][1] & idM64[t - 64]) == idM64[t - 64]) + { + tempMat.M[j][1] ^= tempMat.M[t][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[t][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[t][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[t][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + for (t = 63; t >= 0; t--) + { + for (j = t - 1; j >= 0; j--) + { + if ((tempMat.M[j][0] & idM64[t]) == idM64[t]) + { + tempMat.M[j][0] ^= tempMat.M[t][0]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[t][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[t][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[t][3]; + + trail[times][0] = 1; + trail[times][1] = j; + trail[times][2] = t; + times++; + } + } + } + + for (j = times - 1; j >= 0; j--) // generate inverse matrix + { + if (trail[j][0]) // add + { + (*Mat).M[trail[j][1]][0] ^= (*Mat).M[trail[j][2]][0]; + (*Mat).M[trail[j][1]][1] ^= (*Mat).M[trail[j][2]][1]; + (*Mat).M[trail[j][1]][2] ^= (*Mat).M[trail[j][2]][2]; + (*Mat).M[trail[j][1]][3] ^= (*Mat).M[trail[j][2]][3]; + } + else // swap + { + temp = (*Mat).M[trail[j][1]][0]; + (*Mat).M[trail[j][1]][0] = (*Mat).M[trail[j][2]][0]; + (*Mat).M[trail[j][2]][0] = temp; + + temp = (*Mat).M[trail[j][1]][1]; + (*Mat).M[trail[j][1]][1] = (*Mat).M[trail[j][2]][1]; + (*Mat).M[trail[j][2]][1] = temp; + + temp = (*Mat).M[trail[j][1]][2]; + (*Mat).M[trail[j][1]][2] = (*Mat).M[trail[j][2]][2]; + (*Mat).M[trail[j][2]][2] = temp; + + temp = (*Mat).M[trail[j][1]][3]; + (*Mat).M[trail[j][1]][3] = (*Mat).M[trail[j][2]][3]; + (*Mat).M[trail[j][2]][3] = temp; + } + } + } + else // invertible + { + for (i = 255; i >= 192; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) + { + tempMat.M[j][3] ^= tempMat.M[i][3]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + for (i = 191; i >= 128; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) + { + tempMat.M[j][2] ^= tempMat.M[i][2]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + for (i = 127; i >= 64; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) + { + tempMat.M[j][1] ^= tempMat.M[i][1]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + for (i = 63; i >= 0; i--) + { + for (j = i - 1; j >= 0; j--) + { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) + { + tempMat.M[j][0] ^= tempMat.M[i][0]; + + (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; + (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; + (*Mat_inv).M[j][2] ^= (*Mat_inv).M[i][2]; + (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; + } + } + } + copyM256(resultMat, Mat); + } +} +void genaffinepairM4(Aff4 *aff, Aff4 *aff_inv) // generate a pair of affine +{ + genMatpairM4(&(aff->Mat), &(aff_inv->Mat)); + randV4(&(aff->Vec)); + MatMulVecM4((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); +} +void genaffinepairM8(Aff8 *aff, Aff8 *aff_inv) // generate a pair of affine +{ + genMatpairM8(&(aff->Mat), &(aff_inv->Mat)); + randV8(&(aff->Vec)); + MatMulVecM8((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); +} +void genaffinepairM16(Aff16 *aff, Aff16 *aff_inv) // generate a pair of affine +{ + genMatpairM16(&(aff->Mat), &(aff_inv->Mat)); + randV16(&(aff->Vec)); + MatMulVecM16((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); +} +void genaffinepairM32(Aff32 *aff, Aff32 *aff_inv) // generate a pair of affine +{ + genMatpairM32(&(aff->Mat), &(aff_inv->Mat)); + randV32(&(aff->Vec)); + MatMulVecM32((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); +} +void genaffinepairM64(Aff64 *aff, Aff64 *aff_inv) // generate a pair of affine +{ + genMatpairM64(&(aff->Mat), &(aff_inv->Mat)); + randV64(&(aff->Vec)); + MatMulVecM64((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); +} +void genaffinepairM128(Aff128 *aff, Aff128 *aff_inv) // generate a pair of affine +{ + genMatpairM128(&(aff->Mat), &(aff_inv->Mat)); + randV128(&(aff->Vec)); + MatMulVecM128((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); +} +void MatrixcomM8to32(M8 m1, M8 m2, M8 m3, M8 m4, M32 *mat) // diagonal matrix concatenation, four 8*8 -> 32*32 +{ + int i; + int j = 0; + uint8_t *m; + initM32(mat); + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 3) = m1.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 2) = m2.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 1) = m3.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *m = m4.M[i]; + j++; + } +} +void VectorcomV8to32(V8 v1, V8 v2, V8 v3, V8 v4, V32 *vec) // 4 vectors concatenation +{ + uint8_t *v; + v = (uint8_t *)&(*vec).V; + *(v + 3) = v1.V; + *(v + 2) = v2.V; + *(v + 1) = v3.V; + *v = v4.V; +} +void affinecomM8to32(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff32 *aff) // diagonal affine concatenation, four 8*8 -> 32*32 +{ + MatrixcomM8to32(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, &(aff->Mat)); + VectorcomV8to32(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, &(aff->Vec)); +} +void MatrixcomM16to64(M16 m1, M16 m2, M16 m3, M16 m4, M64 *mat) // diagonal matrix concatenation, four 16*16 -> 64*64 +{ + int i; + int j = 0; + uint16_t *m; + initM64(mat); + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j]; + *(m + 3) = m1.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j]; + *(m + 2) = m2.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j]; + *(m + 1) = m3.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j]; + *m = m4.M[i]; + j++; + } +} +void VectorcomV16to64(V16 v1, V16 v2, V16 v3, V16 v4, V64 *vec) // 4 vectors concatenation +{ + uint16_t *v; + v = (uint16_t *)&(*vec).V; + *(v + 3) = v1.V; + *(v + 2) = v2.V; + *(v + 1) = v3.V; + *v = v4.V; +} +void affinecomM16to64(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff64 *aff) // diagonal affine concatenation,four 16*16 -> 64*64 +{ + MatrixcomM16to64(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, &(aff->Mat)); + VectorcomV16to64(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, &(aff->Vec)); +} +void MatrixcomM8to64(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M64 *mat) // diagonal matrix concatenation,four 8*8 -> 64*64 +{ + int i; + int j = 0; + uint8_t *m; + initM64(mat); + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 7) = m1.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 6) = m2.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 5) = m3.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 4) = m4.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 3) = m5.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 2) = m6.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *(m + 1) = m7.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j]; + *m = m8.M[i]; + j++; + } +} +void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V64 *vec) // 8 vectors concatenation +{ + uint8_t *v; + v = (uint8_t *)&(*vec).V; + *(v + 7) = v1.V; + *(v + 6) = v2.V; + *(v + 5) = v3.V; + *(v + 4) = v4.V; + *(v + 3) = v5.V; + *(v + 2) = v6.V; + *(v + 1) = v7.V; + *v = v8.V; +} +void affinecomM8to64(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff64 *aff) // diagonal affine concatenation, four 8*8 -> 64*64 +{ + MatrixcomM8to64(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, aff7.Mat, aff8.Mat, &(aff->Mat)); + VectorcomV8to64(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, aff7.Vec, aff8.Vec, &(aff->Vec)); +} +void MatrixcomM32to128(M32 m1, M32 m2, M32 m3, M32 m4, M128 *mat) // diagonal matrix concatenation, four 32*32 -> 128*128 +{ + int i; + int j = 0; + uint32_t *m; + initM128(mat); + for (i = 0; i < 32; i++) + { + m = (uint32_t *)&(*mat).M[j][0]; + *(m + 1) = m1.M[i]; + j++; + } + for (i = 0; i < 32; i++) + { + m = (uint32_t *)&(*mat).M[j][0]; + *m = m2.M[i]; + j++; + } + for (i = 0; i < 32; i++) + { + m = (uint32_t *)&(*mat).M[j][1]; + *(m + 1) = m3.M[i]; + j++; + } + for (i = 0; i < 32; i++) + { + m = (uint32_t *)&(*mat).M[j][1]; + *m = m4.M[i]; + j++; + } +} +void VectorcomV32to128(V32 v1, V32 v2, V32 v3, V32 v4, V128 *vec) // 4 vectors concatenation +{ + uint32_t *v; + v = (uint32_t *)&(*vec).V[0]; + *(v + 1) = v1.V; + *v = v2.V; + v = (uint32_t *)&(*vec).V[1]; + *(v + 1) = v3.V; + *v = v4.V; +} +void affinecomM32to128(Aff32 aff1, Aff32 aff2, Aff32 aff3, Aff32 aff4, Aff128 *aff) // diagonal affine concatenation, four 32*32 -> 128*128 +{ + MatrixcomM32to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, &(aff->Mat)); + VectorcomV32to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, &(aff->Vec)); +} +void MatrixcomM8to128(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M8 m9, M8 m10, M8 m11, M8 m12, M8 m13, M8 m14, M8 m15, M8 m16, M128 *mat) // diagonal matrix concatenation, 16 8*8 -> 128*128 +{ + int i; + int j = 0; + uint8_t *m; + initM128(mat); + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 7) = m1.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 6) = m2.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 5) = m3.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 4) = m4.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 3) = m5.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 2) = m6.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *(m + 1) = m7.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][0]; + *m = m8.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 7) = m9.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 6) = m10.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 5) = m11.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 4) = m12.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 3) = m13.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 2) = m14.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *(m + 1) = m15.M[i]; + j++; + } + for (i = 0; i < 8; i++) + { + m = (uint8_t *)&(*mat).M[j][1]; + *m = m16.M[i]; + j++; + } +} +void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V8 v9, V8 v10, V8 v11, V8 v12, V8 v13, V8 v14, V8 v15, V8 v16, V128 *vec) // 16 vectors concatenation +{ + uint8_t *v; + v = (uint8_t *)&(*vec).V[0]; + *(v + 7) = v1.V; + *(v + 6) = v2.V; + *(v + 5) = v3.V; + *(v + 4) = v4.V; + *(v + 3) = v5.V; + *(v + 2) = v6.V; + *(v + 1) = v7.V; + *v = v8.V; + v = (uint8_t *)&(*vec).V[1]; + *(v + 7) = v9.V; + *(v + 6) = v10.V; + *(v + 5) = v11.V; + *(v + 4) = v12.V; + *(v + 3) = v13.V; + *(v + 2) = v14.V; + *(v + 1) = v15.V; + *v = v16.V; +} +void affinecomM8to128(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff8 aff9, Aff8 aff10, Aff8 aff11, Aff8 aff12, Aff8 aff13, Aff8 aff14, Aff8 aff15, Aff8 aff16, Aff128 *aff) // diagonal affine concatenation, 16 8*8 -> 128*128 +{ + MatrixcomM8to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, aff7.Mat, aff8.Mat, aff9.Mat, aff10.Mat, aff11.Mat, aff12.Mat, aff13.Mat, aff14.Mat, aff15.Mat, aff16.Mat, &(aff->Mat)); + VectorcomV8to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, aff7.Vec, aff8.Vec, aff9.Vec, aff10.Vec, aff11.Vec, aff12.Vec, aff13.Vec, aff14.Vec, aff15.Vec, aff16.Vec, &(aff->Vec)); +} +void MatrixcomM16to128(M16 m1, M16 m2, M16 m3, M16 m4, M16 m5, M16 m6, M16 m7, M16 m8, M128 *mat) // diagonal matrix concatenation, 8 16*16 -> 128*128 +{ + int i; + int j = 0; + uint16_t *m; + initM128(mat); + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][0]; + *(m + 3) = m1.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][0]; + *(m + 2) = m2.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][0]; + *(m + 1) = m3.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][0]; + *m = m4.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][1]; + *(m + 3) = m5.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][1]; + *(m + 2) = m6.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][1]; + *(m + 1) = m7.M[i]; + j++; + } + for (i = 0; i < 16; i++) + { + m = (uint16_t *)&(*mat).M[j][1]; + *m = m8.M[i]; + j++; + } +} +void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, V16 v8, V128 *vec) // 8 vectors concatenation +{ + uint16_t *v; + v = (uint16_t *)&(*vec).V[0]; + *(v + 3) = v1.V; + *(v + 2) = v2.V; + *(v + 1) = v3.V; + *v = v4.V; + v = (uint16_t *)&(*vec).V[1]; + *(v + 3) = v5.V; + *(v + 2) = v6.V; + *(v + 1) = v7.V; + *v = v8.V; +} +void affinecomM16to128(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff16 aff5, Aff16 aff6, Aff16 aff7, Aff16 aff8, Aff128 *aff) // diagonal affine concatenation, 8 16*16 -> 128*128 +{ + MatrixcomM16to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, aff7.Mat, aff8.Mat, &(aff->Mat)); + VectorcomV16to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, aff7.Vec, aff8.Vec, &(aff->Vec)); +} +void MattransM4(M4 Mat, M4 *Mat_trans) // matrix tansposition M4 +{ + int i, j; + uint8_t mask[2], k, k2, l, temp; + mask[0] = 0x5; + mask[1] = 0x3; + for (j = 0; j < 2; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 2; i++) + { + l = (k2 * i) % 3; + temp = ((Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k)) & 0x0f; + Mat.M[l + k] = ((Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k)) & 0x0f; + Mat.M[l] = temp; + } + } + copyM4(Mat, Mat_trans); +} +void MattransM8(M8 Mat, M8 *Mat_trans) // matrix tansposition M8 +{ + int i, j; + uint8_t mask[3], k, k2, l, temp; + mask[0] = 0x55; + mask[1] = 0x33; + mask[2] = 0x0f; + for (j = 0; j < 3; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 4; i++) + { + l = (k2 * i) % 7; + temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l] = temp; + } + } + copyM8(Mat, Mat_trans); +} +void MattransM16(M16 Mat, M16 *Mat_trans) // matrix tansposition M16 +{ + int i, j; + uint16_t mask[4], k, k2, l, temp; + mask[0] = 0x5555; + mask[1] = 0x3333; + mask[2] = 0x0f0f; + mask[3] = 0x00ff; + for (j = 0; j < 4; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 8; i++) + { + l = (k2 * i) % 15; + temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l] = temp; + } + } + copyM16(Mat, Mat_trans); +} +void MattransM32(M32 Mat, M32 *Mat_trans) // matrix tansposition M32 +{ + int i, j; + uint32_t mask[5], k, k2, l, temp; + mask[0] = 0x55555555; + mask[1] = 0x33333333; + mask[2] = 0x0f0f0f0f; + mask[3] = 0x00ff00ff; + mask[4] = 0x0000ffff; + for (j = 0; j < 5; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 16; i++) + { + l = (k2 * i) % 31; + temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l] = temp; + } + } + copyM32(Mat, Mat_trans); +} +void MattransM64(M64 Mat, M64 *Mat_trans) // matrix tansposition M64 +{ + int i, j; + uint64_t mask[6], k, k2, l, temp; + mask[0] = 0x5555555555555555; + mask[1] = 0x3333333333333333; + mask[2] = 0x0f0f0f0f0f0f0f0f; + mask[3] = 0x00ff00ff00ff00ff; + mask[4] = 0x0000ffff0000ffff; + mask[5] = 0x00000000ffffffff; + for (j = 0; j < 6; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 32; i++) + { + l = (k2 * i) % 63; + temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l] = temp; + } + } + copyM64(Mat, Mat_trans); +} +void MattransM128(M128 Mat, M128 *Mat_trans) // matrix tansposition M128 +{ + int i, j; + uint64_t mask[6], k, k2, l, temp; + mask[0] = 0x5555555555555555; + mask[1] = 0x3333333333333333; + mask[2] = 0x0f0f0f0f0f0f0f0f; + mask[3] = 0x00ff00ff00ff00ff; + mask[4] = 0x0000ffff0000ffff; + mask[5] = 0x00000000ffffffff; + for (j = 0; j < 6; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 64; i++) + { + l = (k2 * i) % 127; + temp = (Mat.M[l][0] & ~mask[j]) ^ ((Mat.M[l + k][0] & ~mask[j]) >> k); + Mat.M[l + k][0] = (Mat.M[l + k][0] & mask[j]) ^ ((Mat.M[l][0] & mask[j]) << k); + Mat.M[l][0] = temp; + + temp = (Mat.M[l][1] & ~mask[j]) ^ ((Mat.M[l + k][1] & ~mask[j]) >> k); + Mat.M[l + k][1] = (Mat.M[l + k][1] & mask[j]) ^ ((Mat.M[l][1] & mask[j]) << k); + Mat.M[l][1] = temp; + } + } + for (i = 0; i < 64; i++) + { + temp = Mat.M[i + 64][0]; + Mat.M[i + 64][0] = Mat.M[i][1]; + Mat.M[i][1] = temp; + } + copyM128(Mat, Mat_trans); +} +void MattransM256(M256 Mat, M256 *Mat_trans) // matrix tansposition M128 +{ + int i, j; + uint64_t mask[6], k, k2, l, temp; + mask[0] = 0x5555555555555555; + mask[1] = 0x3333333333333333; + mask[2] = 0x0f0f0f0f0f0f0f0f; + mask[3] = 0x00ff00ff00ff00ff; + mask[4] = 0x0000ffff0000ffff; + mask[5] = 0x00000000ffffffff; + for (j = 0; j < 6; j++) + { + k = 1 << j; + k2 = k * 2; + for (i = 0; i < 128; i++) + { + l = (k2 * i) % 255; + temp = (Mat.M[l][0] & ~mask[j]) ^ ((Mat.M[l + k][0] & ~mask[j]) >> k); + Mat.M[l + k][0] = (Mat.M[l + k][0] & mask[j]) ^ ((Mat.M[l][0] & mask[j]) << k); + Mat.M[l][0] = temp; + + temp = (Mat.M[l][1] & ~mask[j]) ^ ((Mat.M[l + k][1] & ~mask[j]) >> k); + Mat.M[l + k][1] = (Mat.M[l + k][1] & mask[j]) ^ ((Mat.M[l][1] & mask[j]) << k); + Mat.M[l][1] = temp; + + temp = (Mat.M[l][2] & ~mask[j]) ^ ((Mat.M[l + k][2] & ~mask[j]) >> k); + Mat.M[l + k][2] = (Mat.M[l + k][2] & mask[j]) ^ ((Mat.M[l][2] & mask[j]) << k); + Mat.M[l][2] = temp; + + temp = (Mat.M[l][3] & ~mask[j]) ^ ((Mat.M[l + k][3] & ~mask[j]) >> k); + Mat.M[l + k][3] = (Mat.M[l + k][3] & mask[j]) ^ ((Mat.M[l][3] & mask[j]) << k); + Mat.M[l][3] = temp; + } + } + for (i = 0; i < 64; i++) + { + temp = Mat.M[i + 64][0]; + Mat.M[i + 64][0] = Mat.M[i][1]; + Mat.M[i][1] = temp; + + temp = Mat.M[i + 64][2]; + Mat.M[i + 64][2] = Mat.M[i][3]; + Mat.M[i][3] = temp; + + temp = Mat.M[i + 192][0]; + Mat.M[i + 192][0] = Mat.M[i + 128][1]; + Mat.M[i + 128][1] = temp; + + temp = Mat.M[i + 192][2]; + Mat.M[i + 192][2] = Mat.M[i + 128][3]; + Mat.M[i + 128][3] = temp; + } + for (i = 0; i < 128; i++) + { + temp = Mat.M[i + 128][0]; + Mat.M[i + 128][0] = Mat.M[i][2]; + Mat.M[i][2] = temp; + + temp = Mat.M[i + 128][1]; + Mat.M[i + 128][1] = Mat.M[i][3]; + Mat.M[i][3] = temp; + } + copyM256(Mat, Mat_trans); +} +void MatAddMatM4(M4 Mat1, M4 Mat2, M4 *Mat) +{ + int i; + for (i = 0; i < 4; i++) + { + (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; + } +} +void MatAddMatM8(M8 Mat1, M8 Mat2, M8 *Mat) +{ + int i; + for (i = 0; i < 8; i++) + { + (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; + } +} +void MatAddMatM16(M16 Mat1, M16 Mat2, M16 *Mat) +{ + int i; + for (i = 0; i < 16; i++) + { + (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; + } +} +void MatAddMatM32(M32 Mat1, M32 Mat2, M32 *Mat) +{ + int i; + for (i = 0; i < 32; i++) + { + (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; + } +} +void MatAddMatM64(M64 Mat1, M64 Mat2, M64 *Mat) +{ + int i; + for (i = 0; i < 64; i++) + { + (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; + } +} +void MatAddMatM128(M128 Mat1, M128 Mat2, M128 *Mat) +{ + int i; + for (i = 0; i < 128; i++) + { + (*Mat).M[i][0] = Mat1.M[i][0] ^ Mat2.M[i][0]; + (*Mat).M[i][1] = Mat1.M[i][1] ^ Mat2.M[i][1]; + } +} +void MatAddMatM256(M256 Mat1, M256 Mat2, M256 *Mat) +{ + for (int i = 0; i < 256; i++) + { + (*Mat).M[i][0] = Mat1.M[i][0] ^ Mat2.M[i][0]; + (*Mat).M[i][1] = Mat1.M[i][1] ^ Mat2.M[i][1]; + (*Mat).M[i][2] = Mat1.M[i][2] ^ Mat2.M[i][2]; + (*Mat).M[i][3] = Mat1.M[i][3] ^ Mat2.M[i][3]; + } +} +void MatMulMatM4(M4 Mat1, M4 Mat2, M4 *Mat) // matrix multiplication 4*4 mul 4*4 -> 4*4 +{ + int i, j; + M4 Mat2_trans; + initM4(Mat); + MattransM4(Mat2, &Mat2_trans); + for (i = 0; i < 4; i++) + { + for (j = 0; j < 4; j++) + { + if (xorU4(Mat1.M[i] & Mat2_trans.M[j] & 0x0f)) + (*Mat).M[i] ^= idM4[j]; + } + } +} +void MatMulMatM8(M8 Mat1, M8 Mat2, M8 *Mat) // matrix multiplication 8*8 mul 8*8 -> 8*8 +{ + int i, j; + M8 Mat2_trans; + initM8(Mat); + MattransM8(Mat2, &Mat2_trans); + for (i = 0; i < 8; i++) + { + for (j = 0; j < 8; j++) + { + if (xorU8(Mat1.M[i] & Mat2_trans.M[j])) + (*Mat).M[i] ^= idM8[j]; + } + } +} +void MatMulMatM16(M16 Mat1, M16 Mat2, M16 *Mat) // matrix multiplication 16*16 mul 16*16 -> 16*16 +{ + int i, j; + M16 Mat2_trans; + initM16(Mat); + MattransM16(Mat2, &Mat2_trans); + for (i = 0; i < 16; i++) + { + for (j = 0; j < 16; j++) + { + if (xorU16(Mat1.M[i] & Mat2_trans.M[j])) + (*Mat).M[i] ^= idM16[j]; + } + } +} +void MatMulMatM32(M32 Mat1, M32 Mat2, M32 *Mat) // matrix multiplication 32*32 mul 32*32 -> 32*32 +{ + int i, j; + M32 Mat2_trans; + initM32(Mat); + MattransM32(Mat2, &Mat2_trans); + for (i = 0; i < 32; i++) + { + for (j = 0; j < 32; j++) + { + if (xorU32(Mat1.M[i] & Mat2_trans.M[j])) + (*Mat).M[i] ^= idM32[j]; + } + } +} +void MatMulMatM64(M64 Mat1, M64 Mat2, M64 *Mat) // matrix multiplication 64*64 mul 64*64 -> 64*64 +{ + int i, j; + M64 Mat2_trans; + initM64(Mat); + MattransM64(Mat2, &Mat2_trans); + for (i = 0; i < 64; i++) + { + for (j = 0; j < 64; j++) + { + if (xorU64(Mat1.M[i] & Mat2_trans.M[j])) + (*Mat).M[i] ^= idM64[j]; + } + } +} +void MatMulMatM128(M128 Mat1, M128 Mat2, M128 *Mat) // matrix multiplication 128*128 mul 128*128 -> 128*128 +{ + int i, j; + M128 Mat2_trans; + uint64_t temp[2]; + initM128(Mat); + MattransM128(Mat2, &Mat2_trans); + for (i = 0; i < 128; i++) + { + for (j = 0; j < 64; j++) + { + temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; + temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; + if (xorU128(temp)) + (*Mat).M[i][0] ^= idM64[j]; + } + for (j = 64; j < 128; j++) + { + temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; + temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; + if (xorU128(temp)) + (*Mat).M[i][1] ^= idM64[j - 64]; + } + } +} +void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) // matrix multiplication 256*256 mul 256*256 -> 256*256 +{ + int i, j; + M256 Mat2_trans; + uint64_t temp[4]; + initM256(Mat); + MattransM256(Mat2, &Mat2_trans); + for (i = 0; i < 256; i++) + { + for (j = 0; j < 64; j++) + { + temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; + temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; + temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; + temp[3] = Mat1.M[i][3] & Mat2_trans.M[j][3]; + if (xorU256(temp)) + (*Mat).M[i][0] ^= idM64[j]; + } + for (j = 64; j < 128; j++) + { + temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; + temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; + temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; + temp[3] = Mat1.M[i][3] & Mat2_trans.M[j][3]; + if (xorU256(temp)) + (*Mat).M[i][1] ^= idM64[j - 64]; + } + for (j = 128; j < 192; j++) + { + temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; + temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; + temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; + temp[3] = Mat1.M[i][3] & Mat2_trans.M[j][3]; + if (xorU256(temp)) + (*Mat).M[i][2] ^= idM64[j - 128]; + } + for (j = 192; j < 256; j++) + { + temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; + temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; + temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; + temp[3] = Mat1.M[i][3] & Mat2_trans.M[j][3]; + if (xorU256(temp)) + (*Mat).M[i][3] ^= idM64[j - 192]; + } + } +} +void affinemixM4(Aff4 aff, Aff4 preaff_inv, Aff4 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +{ + MatMulMatM4(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); + MatMulVecM4(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); + (*mixaff).Vec.V ^= aff.Vec.V; +} +void affinemixM8(Aff8 aff, Aff8 preaff_inv, Aff8 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +{ + MatMulMatM8(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); + MatMulVecM8(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); + (*mixaff).Vec.V ^= aff.Vec.V; +} +void affinemixM16(Aff16 aff, Aff16 preaff_inv, Aff16 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +{ + MatMulMatM16(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); + MatMulVecM16(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); + (*mixaff).Vec.V ^= aff.Vec.V; +} +void affinemixM32(Aff32 aff, Aff32 preaff_inv, Aff32 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +{ + MatMulMatM32(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); + MatMulVecM32(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); + (*mixaff).Vec.V ^= aff.Vec.V; +} +void affinemixM64(Aff64 aff, Aff64 preaff_inv, Aff64 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +{ + MatMulMatM64(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); + MatMulVecM64(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); + (*mixaff).Vec.V ^= aff.Vec.V; +} +void affinemixM128(Aff128 aff, Aff128 preaff_inv, Aff128 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +{ + MatMulMatM128(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); + MatMulVecM128(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); + (*mixaff).Vec.V[0] ^= aff.Vec.V[0]; + (*mixaff).Vec.V[1] ^= aff.Vec.V[1]; +} \ No newline at end of file diff --git a/crypto/sm4/wb/WBMatrix.h b/crypto/sm4/wb/WBMatrix.h new file mode 100644 index 000000000..377661913 --- /dev/null +++ b/crypto/sm4/wb/WBMatrix.h @@ -0,0 +1,314 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/WBMatrix. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#ifndef _HWBMATRIX_H_ +#define _HWBMATRIX_H_ + +#include +#include +#include + +#include "crypto/wbstructure.h" +#include "WBRandom.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +void SetRandSeed(unsigned int seed);//Set random seed +/* +* 4-bit Matrix operation +*/ + +void initM4(M4 *Mat); +void randM4(M4 *Mat); +void identityM4(M4 *Mat); +void printM4(M4 Mat); +void printbitM4(M4 Mat); +void copyM4(M4 Mat1, M4 *Mat2); +int isequalM4(M4 Mat1, M4 Mat2); +int isinvertM4(M4 Mat); +void invsM4(M4 Mat, M4 *Mat_inv); +int readbitM4(M4 Mat, int i, int j); +void flipbitM4(M4 *Mat, int i, int j); +void setbitM4(M4 *Mat, int i, int j, int bit); + +void initV4(V4 *Vec); +void randV4(V4 *Vec); +void printV4(V4 Vec); +int isequalV4(V4 Vec1, V4 Vec2); +void VecAddVecV4(V4 Vec1, V4 Vec2, V4 *Vec); + +uint8_t affineU4(Aff4 aff, uint8_t arr); +int xorU4(uint8_t n); +int HWU4(uint8_t n); + +void MatMulVecM4(M4 Mat,V4 Vec, V4 *ans); +uint8_t MatMulNumM4(M4 Mat, uint8_t n); +void MatMulMatM4(M4 Mat1, M4 Mat2, M4 *Mat); +void MatAddMatM4(M4 Mat1, M4 Mat2, M4 *Mat); +void MattransM4(M4 Mat, M4 *Mat_trans); + +void genMatpairM4(M4 *Mat, M4 *Mat_inv); +void genaffinepairM4(Aff4 *aff, Aff4 *aff_inv); +void affinemixM4(Aff4 aff, Aff4 preaff_inv, Aff4 *mixaff); + +/* +* 8-bit Matrix operation +*/ + +void initM8(M8 *Mat); +void randM8(M8 *Mat); +void identityM8(M8 *Mat); +void printM8(M8 Mat); +void printbitM8(M8 Mat); +void copyM8(M8 Mat1, M8 *Mat2); +int isequalM8(M8 Mat1, M8 Mat2); +int isinvertM8(M8 Mat); +void invsM8(M8 Mat, M8 *Mat_inv); +int readbitM8(M8 Mat, int i, int j); +void flipbitM8(M8 *Mat, int i, int j); +void setbitM8(M8 *Mat, int i, int j, int bit); + +void initV8(V8 *Vec); +void randV8(V8 *Vec); +void printV8(V8 Vec); +int isequalV8(V8 Vec1, V8 Vec2); +void VecAddVecV8(V8 Vec1, V8 Vec2, V8 *Vec); + +uint8_t affineU8(Aff8 aff, uint8_t arr); +int xorU8(uint8_t n); +int HWU8(uint8_t n); +void printU8(uint8_t n); + +void MatMulVecM8(M8 Mat,V8 Vec, V8 *ans); +uint8_t MatMulNumM8(M8 Mat, uint8_t n); +void MatMulMatM8(M8 Mat1, M8 Mat2, M8 *Mat); +void MatAddMatM8(M8 Mat1, M8 Mat2, M8 *Mat); +void MattransM8(M8 Mat, M8 *Mat_trans); + +void genMatpairM8(M8 *Mat, M8 *Mat_inv); +void genaffinepairM8(Aff8 *aff, Aff8 *aff_inv); +void affinemixM8(Aff8 aff, Aff8 preaff_inv, Aff8 *mixaff); + +/* +* 16-bit Matrix operation +*/ + +void initM16(M16 *Mat); +void randM16(M16 *Mat); +void identityM16(M16 *Mat); +void printM16(M16 Mat); +void printbitM16(M16 Mat); +void copyM16(M16 Mat1, M16 *Mat2); +int isequalM16(M16 Mat1, M16 Mat2); +int isinvertM16(M16 Mat); +void invsM16(M16 Mat, M16 *Mat_inv); +int readbitM16(M16 Mat, int i, int j); +void flipbitM16(M16 *Mat, int i, int j); +void setbitM16(M16 *Mat, int i, int j, int bit); + +void initV16(V16 *Vec); +void randV16(V16 *Vec); +void printV16(V16 Vec); +int isequalV16(V16 Vec1, V16 Vec2); +void VecAddVecV16(V16 Vec1, V16 Vec2, V16 *Vec); + +uint16_t affineU16(Aff16 aff, uint16_t arr); +int xorU16(uint16_t n); +int HWU16(uint16_t n); +void printU16(uint16_t n); +void MatAddMatM16(M16 Mat1, M16 Mat2, M16 *Mat); +void MatMulVecM16(M16 Mat, V16 Vec, V16 *ans); +uint16_t MatMulNumM16(M16 Mat, uint16_t n); +void MatMulMatM16(M16 Mat1, M16 Mat2, M16 *Mat); +void MattransM16(M16 Mat, M16 *Mat_trans); + +void genMatpairM16(M16 *Mat, M16 *Mat_inv); +void genaffinepairM16(Aff16 *aff, Aff16 *aff_inv); +void affinemixM16(Aff16 aff, Aff16 preaff_inv, Aff16 *mixaff); + +/* +* 32-bit Matrix operation +*/ + +void initM32(M32 *Mat); +void randM32(M32 *Mat); +void identityM32(M32 *Mat); +void printM32(M32 Mat); +void printbitM32(M32 Mat); +void copyM32(M32 Mat1, M32 *Mat2); +int isequalM32(M32 Mat1, M32 Mat2); +int isinvertM32(M32 Mat); +void invsM32(M32 Mat, M32 *Mat_inv); +int readbitM32(M32 Mat, int i, int j); +void flipbitM32(M32 *Mat, int i, int j); +void setbitM32(M32 *Mat, int i, int j, int bit); + +void initV32(V32 *Vec); +void randV32(V32 *Vec); +void printV32(V32 Vec); +int isequalV32(V32 Vec1, V32 Vec2); +void VecAddVecV32(V32 Vec1, V32 Vec2, V32 *Vec); + +uint32_t affineU32(Aff32 aff, uint32_t arr); +int xorU32(uint32_t n); +int HWU32(uint32_t n); +void printU32(uint32_t n); + +void MatMulVecM32(M32 Mat, V32 Vec, V32 *ans); +uint32_t MatMulNumM32(M32 Mat, uint32_t n); +void MatMulMatM32(M32 Mat1, M32 Mat2, M32 *Mat); +void MatAddMatM32(M32 Mat1, M32 Mat2, M32 *Mat); +void MattransM32(M32 Mat, M32 *Mat_trans); + +void genMatpairM32(M32 *Mat, M32 *Mat_inv); +void genaffinepairM32(Aff32 *aff, Aff32 *aff_inv); +void affinemixM32(Aff32 aff, Aff32 preaff_inv, Aff32 *mixaff); +void MatrixcomM8to32(M8 m1, M8 m2, M8 m3, M8 m4, M32 *mat); +void VectorcomV8to32(V8 v1, V8 v2, V8 v3, V8 v4, V32 *vec); +void affinecomM8to32(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff32 *aff); + +/* +* 64-bit Matrix operation +*/ + +void initM64(M64 *Mat); +void randM64(M64 *Mat); +void identityM64(M64 *Mat); +void printM64(M64 Mat); +void printbitM64(M64 Mat); +void copyM64(M64 Mat1, M64 *Mat2); +int isequalM64(M64 Mat1, M64 Mat2); +int isinvertM64(M64 Mat); +void invsM64(M64 Mat, M64 *Mat_inv); +int readbitM64(M64 Mat, int i, int j); +void flipbitM64(M64 *Mat, int i, int j); +void setbitM64(M64 *Mat, int i, int j, int bit); + +void initV64(V64 *Vec); +void randV64(V64 *Vec); +void printV64(V64 Vec); +int isequalV64(V64 Vec1, V64 Vec2); +void VecAddVecV64(V64 Vec1, V64 Vec2, V64 *Vec); + +uint64_t affineU64(Aff64 aff, uint64_t arr); +int xorU64(uint64_t n); +int HWU64(uint64_t n); +void printU64(uint64_t n); + +void MatMulVecM64(M64 Mat, V64 Vec, V64 *ans); +uint64_t MatMulNumM64(M64 Mat, uint64_t n); +void MatMulMatM64(M64 Mat1, M64 Mat2, M64 *Mat); +void MattransM64(M64 Mat, M64 *Mat_trans); + +void MatAddMatM64(M64 Mat1, M64 Mat2, M64 *Mat); +void genMatpairM64(M64 *Mat, M64 *Mat_inv); +void genaffinepairM64(Aff64 *aff, Aff64 *aff_inv); +void affinemixM64(Aff64 aff, Aff64 preaff_inv, Aff64 *mixaff); + +void MatrixcomM16to64(M16 m1, M16 m2, M16 m3, M16 m4, M64 *mat); +void VectorcomV16to64(V16 v1, V16 v2, V16 v3, V16 v4, V64 *vec); +void affinecomM16to64(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff64 *aff); +void MatrixcomM8to64(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M64 *mat); +void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V64 *vec); +void affinecomM8to64(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff64 *aff); + +/* +* 128-bit Matrix operation +*/ + +void initM128(M128 *Mat); +void randM128(M128 *Mat); +void identityM128(M128 *Mat); +void printM128(M128 Mat); +void printbitM128(M128 Mat); +void copyM128(M128 Mat1, M128 *Mat2); +int isequalM128(M128 Mat1, M128 Mat2); +int isinvertM128(M128 Mat); +void invsM128(M128 Mat, M128 *Mat_inv); +int readbitM128(M128 Mat, int i, int j); +void flipbitM128(M128 *Mat, int i, int j); +void setbitM128(M128 *Mat, int i, int j, int bit); + +void initV128(V128 *Vec); +void randV128(V128 *Vec); +void printV128(V128 Vec); + +void affineU128(Aff128 aff, uint64_t arr[], uint64_t ans[]); +int xorU128(uint64_t n[]); +int HWU128(uint64_t n[]); +void printU128(uint64_t n[]); +int isequalV128(V128 Vec1, V128 Vec2); +void VecAddVecV128(V128 Vec1, V128 Vec2, V128 *Vec); + +void MatMulVecM128(M128 Mat, V128 Vec, V128 *ans); +void MatMulMatM128(M128 Mat1, M128 Mat2, M128 *Mat); +void MattransM128(M128 Mat, M128 *Mat_trans); + +void MatAddMatM128(M128 Mat1, M128 Mat2, M128 *Mat); +void genMatpairM128(M128 *Mat, M128 *Mat_inv); +void genaffinepairM128(Aff128 *aff, Aff128 *aff_inv); +void affinemixM128(Aff128 aff, Aff128 preaff_inv, Aff128 *mixaff); + +void MatrixcomM32to128(M32 m1, M32 m2, M32 m3, M32 m4, M128 *mat); +void VectorcomV32to128(V32 v1, V32 v2, V32 v3, V32 v4, V128 *vec); +void affinecomM32to128(Aff32 aff1, Aff32 aff2, Aff32 aff3, Aff32 aff4, Aff128 *aff); +void MatrixcomM8to128(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M8 m9, M8 m10, M8 m11, M8 m12, M8 m13, M8 m14, M8 m15, M8 m16, M128 *mat); +void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V8 v9, V8 v10, V8 v11, V8 v12, V8 v13, V8 v14, V8 v15, V8 v16, V128 *vec); +void affinecomM8to128(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff8 aff9, Aff8 aff10, Aff8 aff11, Aff8 aff12, Aff8 aff13, Aff8 aff14, Aff8 aff15, Aff8 aff16, Aff128 *aff); +void MatrixcomM16to128(M16 m1, M16 m2, M16 m3, M16 m4, M16 m5, M16 m6, M16 m7, M16 m8, M128 *mat); +void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, V16 v8, V128 *vec); +void affinecomM16to128(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff16 aff5, Aff16 aff6, Aff16 aff7, Aff16 aff8, Aff128 *aff); + +/* +* 256-bit Matrix operation +*/ + +void initM256(M256 *Mat); +void randM256(M256 *Mat); +void identityM256(M256 *Mat); +void printM256(M256 Mat); +void printbitM256(M256 Mat); +void copyM256(M256 Mat1, M256 *Mat2); +int isequalM256(M256 Mat1, M256 Mat2); +int isinvertM256(M256 Mat); +void invsM256(M256 Mat, M256 *Mat_inv); +int readbitM256(M256 Mat, int i, int j); +void flipbitM256(M256 *Mat, int i, int j); +void setbitM256(M256 *Mat, int i, int j, int bit); + +void initV256(V256 *Vec); +void randV256(V256 *Vec); +void printV256(V256 Vec); + +void affineU256(Aff256 aff, uint64_t arr[], uint64_t ans[]); +int xorU256(uint64_t n[]); +int HWU256(uint64_t n[]); +void printU256(uint64_t n[]); +int isequalV256(V256 Vec1, V256 Vec2); +void VecAddVecV256(V256 Vec1, V256 Vec2, V256 *Vec); + +void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans); +void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat); +void MattransM256(M256 Mat, M256 *Mat_trans); + +void MatAddMatM256(M256 Mat1, M256 Mat2, M256 *Mat); +void genMatpairM256(M256 *Mat, M256 *Mat_inv); +void genaffinepairM256(Aff256 *aff, Aff256 *aff_inv); +void affinemixM256(Aff256 aff, Aff256 preaff_inv, Aff256 *mixaff); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/crypto/sm4/wb/WBRandom.h b/crypto/sm4/wb/WBRandom.h new file mode 100644 index 000000000..82f4398e5 --- /dev/null +++ b/crypto/sm4/wb/WBRandom.h @@ -0,0 +1,25 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/WBMatrix. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#ifndef _WBRANDOM_H_ +#define _WBRANDOM_H_ + +#include "openssl/e_os2.h" +#include "openssl/rand.h" + +static ossl_inline unsigned int cus_random() +{ + unsigned int ret; + RAND_bytes((unsigned char *)&ret, sizeof(ret)); + return ret; +} + +#endif // _WBRANDOM_H_ \ No newline at end of file diff --git a/crypto/sm4/wb/WSISE-wbsm4.c b/crypto/sm4/wb/WSISE-wbsm4.c new file mode 100644 index 000000000..fe0860db7 --- /dev/null +++ b/crypto/sm4/wb/WSISE-wbsm4.c @@ -0,0 +1,536 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/WSISE-White-box-SM4. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include +#include "crypto/wbsm4.h" +#include "WBMatrix.h" + +#define GET32(pc) ( \ + ((uint32_t)(pc)[0] << 24) ^ \ + ((uint32_t)(pc)[1] << 16) ^ \ + ((uint32_t)(pc)[2] << 8) ^ \ + ((uint32_t)(pc)[3])) + +#define PUT32(st, ct) \ + (ct)[0] = (uint8_t)((st) >> 24); \ + (ct)[1] = (uint8_t)((st) >> 16); \ + (ct)[2] = (uint8_t)((st) >> 8); \ + (ct)[3] = (uint8_t)(st) + +static uint8_t SBOX[256]={ + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, +}; + +static M64 L_matrix = { + .M[0] = 0xA000200020008000, + .M[1] = 0x5000100010004000, + .M[2] = 0x2800080008002000, + .M[3] = 0x1400040004001000, + .M[4] = 0x0A00020002000800, + .M[5] = 0x0500010001000400, + .M[6] = 0x0200800080008200, + .M[7] = 0x0100400040004100, + + .M[8] = 0x00A0002000200080, + .M[9] = 0x0050001000100040, + .M[10] = 0x0028000800080020, + .M[11] = 0x0014000400040010, + .M[12] = 0x000A000200020008, + .M[13] = 0x0005000100010004, + .M[14] = 0x0002008000800082, + .M[15] = 0x0001004000400041, + + .M[16] = 0x8000A00020002000, + .M[17] = 0x4000500010001000, + .M[18] = 0x2000280008000800, + .M[19] = 0x1000140004000400, + .M[20] = 0x08000A0002000200, + .M[21] = 0x0400050001000100, + .M[22] = 0x8200020080008000, + .M[23] = 0x4100010040004000, + + .M[24] = 0x008000A000200020, + .M[25] = 0x0040005000100010, + .M[26] = 0x0020002800080008, + .M[27] = 0x0010001400040004, + .M[28] = 0x0008000A00020002, + .M[29] = 0x0004000500010001, + .M[30] = 0x0082000200800080, + .M[31] = 0x0041000100400040, + + .M[32] = 0x20008000A0002000, + .M[33] = 0x1000400050001000, + .M[34] = 0x0800200028000800, + .M[35] = 0x0400100014000400, + .M[36] = 0x020008000A000200, + .M[37] = 0x0100040005000100, + .M[38] = 0x8000820002008000, + .M[39] = 0x4000410001004000, + + .M[40] = 0x0020008000A00020, + .M[41] = 0x0010004000500010, + .M[42] = 0x0008002000280008, + .M[43] = 0x0004001000140004, + .M[44] = 0x00020008000A0002, + .M[45] = 0x0001000400050001, + .M[46] = 0x0080008200020080, + .M[47] = 0x0040004100010040, + + .M[48] = 0x200020008000A000, + .M[49] = 0x1000100040005000, + .M[50] = 0x0800080020002800, + .M[51] = 0x0400040010001400, + .M[52] = 0x0200020008000A00, + .M[53] = 0x0100010004000500, + .M[54] = 0x8000800082000200, + .M[55] = 0x4000400041000100, + + .M[56] = 0x00200020008000A0, + .M[57] = 0x0010001000400050, + .M[58] = 0x0008000800200028, + .M[59] = 0x0004000400100014, + .M[60] = 0x000200020008000A, + .M[61] = 0x0001000100040005, + .M[62] = 0x0080008000820002, + .M[63] = 0x0040004000410001 +}; + +static M64 SR0 = { + .M[0] = 0x8000000000000000, + .M[1] = 0x4000000000000000, + .M[2] = 0x2000000000000000, + .M[3] = 0x1000000000000000, + .M[4] = 0x0800000000000000, + .M[5] = 0x0400000000000000, + .M[6] = 0x0200000000000000, + .M[7] = 0x0100000000000000, + + .M[8] = 0x0000800000000000, + .M[9] = 0x0000400000000000, + .M[10] = 0x0000200000000000, + .M[11] = 0x0000100000000000, + .M[12] = 0x0000080000000000, + .M[13] = 0x0000040000000000, + .M[14] = 0x0000020000000000, + .M[15] = 0x0000010000000000, + + .M[16] = 0x0000000080000000, + .M[17] = 0x0000000040000000, + .M[18] = 0x0000000020000000, + .M[19] = 0x0000000010000000, + .M[20] = 0x0000000008000000, + .M[21] = 0x0000000004000000, + .M[22] = 0x0000000002000000, + .M[23] = 0x0000000001000000, + + .M[24] = 0x0000000000008000, + .M[25] = 0x0000000000004000, + .M[26] = 0x0000000000002000, + .M[27] = 0x0000000000001000, + .M[28] = 0x0000000000000800, + .M[29] = 0x0000000000000400, + .M[30] = 0x0000000000000200, + .M[31] = 0x0000000000000100, + + .M[32] = 0x0080000000000000, + .M[33] = 0x0040000000000000, + .M[34] = 0x0020000000000000, + .M[35] = 0x0010000000000000, + .M[36] = 0x0008000000000000, + .M[37] = 0x0004000000000000, + .M[38] = 0x0002000000000000, + .M[39] = 0x0001000000000000, + + .M[40] = 0x0000008000000000, + .M[41] = 0x0000004000000000, + .M[42] = 0x0000002000000000, + .M[43] = 0x0000001000000000, + .M[44] = 0x0000000800000000, + .M[45] = 0x0000000400000000, + .M[46] = 0x0000000200000000, + .M[47] = 0x0000000100000000, + + .M[48] = 0x0000000000800000, + .M[49] = 0x0000000000400000, + .M[50] = 0x0000000000200000, + .M[51] = 0x0000000000100000, + .M[52] = 0x0000000000080000, + .M[53] = 0x0000000000040000, + .M[54] = 0x0000000000020000, + .M[55] = 0x0000000000010000, + + .M[56] = 0x0000000000000080, + .M[57] = 0x0000000000000040, + .M[58] = 0x0000000000000020, + .M[59] = 0x0000000000000010, + .M[60] = 0x0000000000000008, + .M[61] = 0x0000000000000004, + .M[62] = 0x0000000000000002, + .M[63] = 0x0000000000000001 +}; + +static M64 SR1 = { + .M[0] = 0x0080000000000000, + .M[1] = 0x0040000000000000, + .M[2] = 0x0020000000000000, + .M[3] = 0x0010000000000000, + .M[4] = 0x0008000000000000, + .M[5] = 0x0004000000000000, + .M[6] = 0x0002000000000000, + .M[7] = 0x0001000000000000, + + .M[8] = 0x0000008000000000, + .M[9] = 0x0000004000000000, + .M[10] = 0x0000002000000000, + .M[11] = 0x0000001000000000, + .M[12] = 0x0000000800000000, + .M[13] = 0x0000000400000000, + .M[14] = 0x0000000200000000, + .M[15] = 0x0000000100000000, + + .M[16] = 0x0000000000800000, + .M[17] = 0x0000000000400000, + .M[18] = 0x0000000000200000, + .M[19] = 0x0000000000100000, + .M[20] = 0x0000000000080000, + .M[21] = 0x0000000000040000, + .M[22] = 0x0000000000020000, + .M[23] = 0x0000000000010000, + + .M[24] = 0x0000000000000080, + .M[25] = 0x0000000000000040, + .M[26] = 0x0000000000000020, + .M[27] = 0x0000000000000010, + .M[28] = 0x0000000000000008, + .M[29] = 0x0000000000000004, + .M[30] = 0x0000000000000002, + .M[31] = 0x0000000000000001, + + .M[32] = 0x8000000000000000, + .M[33] = 0x4000000000000000, + .M[34] = 0x2000000000000000, + .M[35] = 0x1000000000000000, + .M[36] = 0x0800000000000000, + .M[37] = 0x0400000000000000, + .M[38] = 0x0200000000000000, + .M[39] = 0x0100000000000000, + + .M[40] = 0x0000800000000000, + .M[41] = 0x0000400000000000, + .M[42] = 0x0000200000000000, + .M[43] = 0x0000100000000000, + .M[44] = 0x0000080000000000, + .M[45] = 0x0000040000000000, + .M[46] = 0x0000020000000000, + .M[47] = 0x0000010000000000, + + .M[48] = 0x0000000080000000, + .M[49] = 0x0000000040000000, + .M[50] = 0x0000000020000000, + .M[51] = 0x0000000010000000, + .M[52] = 0x0000000008000000, + .M[53] = 0x0000000004000000, + .M[54] = 0x0000000002000000, + .M[55] = 0x0000000001000000, + + .M[56] = 0x0000000000008000, + .M[57] = 0x0000000000004000, + .M[58] = 0x0000000000002000, + .M[59] = 0x0000000000001000, + .M[60] = 0x0000000000000800, + .M[61] = 0x0000000000000400, + .M[62] = 0x0000000000000200, + .M[63] = 0x0000000000000100 +}; + +void wbsm4_wsise_set_key(const uint8_t *key, wbsm4_wsise_key *wbsm4_key) +{ + DECLARE_IS_ENDIAN; + + *wbsm4_key = *(wbsm4_wsise_key *)key; + if (IS_LITTLE_ENDIAN) + return; + + uint8_t *p = (uint8_t *)wbsm4_key; + uint8_t *table = (uint8_t *)&wbsm4_key->Table; + uint8_t *end = p + sizeof(wbsm4_wsise_key); + while (p < table) + { + uint8_t t; + t = p[0]; + p[0] = p[3]; + p[3] = t; + + t = p[1]; + p[1] = p[2]; + p[2] = t; + + p += 4; + } + + p = table; + while (p < end) + { + uint8_t t; + t = p[0]; + p[0] = p[7]; + p[7] = t; + + t = p[1]; + p[1] = p[6]; + p[6] = t; + + t = p[2]; + p[2] = p[5]; + p[5] = t; + + t = p[3]; + p[3] = p[4]; + p[4] = t; + + p += 8; + } +} + +void wbsm4_wsise_export_key(const wbsm4_wsise_key *wbsm4_key, uint8_t *key) +{ + DECLARE_IS_ENDIAN; + + wbsm4_wsise_key *out = (wbsm4_wsise_key *)key; + *out = *wbsm4_key; + if (IS_LITTLE_ENDIAN) + return; + + uint8_t *p = (uint8_t *)out; + uint8_t *table = (uint8_t *)&out->Table; + uint8_t *end = p + sizeof(wbsm4_wsise_key); + while (p < table) + { + uint8_t t; + t = p[0]; + p[0] = p[3]; + p[3] = t; + + t = p[1]; + p[1] = p[2]; + p[2] = t; + + p += 4; + } + + p = table; + while (p < end) + { + uint8_t t; + t = p[0]; + p[0] = p[7]; + p[7] = t; + + t = p[1]; + p[1] = p[6]; + p[6] = t; + + t = p[2]; + p[2] = p[5]; + p[5] = t; + + t = p[3]; + p[3] = p[4]; + p[4] = t; + + p += 8; + } +} + +void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key) +{ + int i, j, x; + Aff32 P[36]; + Aff32 P_inv[36]; + Aff8 Eij[32][4]; + Aff8 Eij_inv[32][4]; + Aff32 Ei_inv[32]; + Aff8 Qij[32][4]; + Aff8 Qij_inv[32][4]; + Aff64 Q[32]; + Aff32 Q_inv[32]; + uint8_t temp_u8; + uint16_t temp_u16; + uint32_t temp_u32; + uint64_t temp_u64; + + uint32_t SK[32]; + wbsm4_sm4_setkey(SK, sm4_key); + + for (i = 0; i < 36; i++) + { + // affine P + genaffinepairM32(&P[i], &P_inv[i]); + } + + for (i = 0; i < 32; i++) + { + // affine E + for (j = 0; j < 4; j++) + { + genaffinepairM8(&Eij[i][j], &Eij_inv[i][j]); + genaffinepairM8(&Qij[i][j], &Qij_inv[i][j]); + } + + // combine 4 E8 to 1 E32 + affinecomM8to32(Eij_inv[i][0], Eij_inv[i][1], Eij_inv[i][2], Eij_inv[i][3], &Ei_inv[i]); + + // affine M + affinemixM32(Ei_inv[i], P_inv[i + 1], &wbsm4_key->M[i][0]); + affinemixM32(Ei_inv[i], P_inv[i + 2], &wbsm4_key->M[i][1]); + affinemixM32(Ei_inv[i], P_inv[i + 3], &wbsm4_key->M[i][2]); + + // affine Q + affinecomM8to64(Qij[i][0], Qij[i][1], Qij[i][2], Qij[i][3], Qij[i][0], Qij[i][1], Qij[i][2], Qij[i][3], &Q[i]); + affinecomM8to32(Qij_inv[i][0], Qij_inv[i][1], Qij_inv[i][2], Qij_inv[i][3], &Q_inv[i]); + + // affine C D, C for Xi0, D for T(Xi1+Xi2+Xi3+rk) + affinemixM32(P[i + 4], P_inv[i], &wbsm4_key->C[i]); + affinemixM32(P[i + 4], Q_inv[i], &wbsm4_key->D[i]); + temp_u32 = cus_random(); + wbsm4_key->C[i].Vec.V ^= temp_u32; + wbsm4_key->D[i].Vec.V ^= P[i + 4].Vec.V ^ temp_u32; + } + + for (i = 0; i < 32; i++) + { + V64 Q_constant[3]; + for (j = 0; j < 3; j++) + { + randV64(&Q_constant[j]); + } + + uint8_t randnum[4]; + V8 randvec[4]; + for (j = 0; j < 4; j++) + { + randnum[j] = cus_random() % 2; + randV8(&randvec[j]); + } + + for (x = 0; x < 256; x++) + { + for (j = 0; j < 4; j++) + { + temp_u8 = affineU8(Eij[i][j], x); + if (randnum[j] == 0) + temp_u16 = (SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)] << 8) | SBOX[temp_u8 ^ randvec[j].V]; + else + temp_u16 = (SBOX[temp_u8 ^ randvec[j].V] << 8) | SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)]; + temp_u64 = ((uint64_t)temp_u16) << (48 - 16 * j); + temp_u64 = MatMulNumM64(L_matrix, temp_u64); + if (randnum[j] == 0) + temp_u64 = MatMulNumM64(SR0, temp_u64); + else + temp_u64 = MatMulNumM64(SR1, temp_u64); + wbsm4_key->Table[i][j][x] = MatMulNumM64(Q[i].Mat, temp_u64); + } + for (j = 0; j < 3; j++) + { + wbsm4_key->Table[i][j][x] ^= Q_constant[j].V; + } + wbsm4_key->Table[i][3][x] ^= Q[i].Vec.V ^ Q_constant[0].V ^ Q_constant[1].V ^ Q_constant[2].V; + } + } + + // external encoding + for (i = 0; i < 4; i++) + { + wbsm4_key->SE[i].Mat = P[i].Mat; + wbsm4_key->SE[i].Vec = P[i].Vec; + + wbsm4_key->FE[i].Mat = P_inv[35 - i].Mat; + wbsm4_key->FE[i].Vec = P_inv[35 - i].Vec; + } +} + +void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_wsise_key *wbsm4_key) +{ + int i; + uint32_t x0, x1, x2, x3, x4; + uint64_t xx4; + uint32_t xt0, xt1, xt2, xt3, xt4; + + x0 = GET32(IN); + x1 = GET32(IN + 4); + x2 = GET32(IN + 8); + x3 = GET32(IN + 12); + + x0 = affineU32(wbsm4_key->SE[0], x0); + x1 = affineU32(wbsm4_key->SE[1], x1); + x2 = affineU32(wbsm4_key->SE[2], x2); + x3 = affineU32(wbsm4_key->SE[3], x3); + + for (i = 0; i < 32; i++) + { + xt1 = affineU32(wbsm4_key->M[i][0], x1); + xt2 = affineU32(wbsm4_key->M[i][1], x2); + xt3 = affineU32(wbsm4_key->M[i][2], x3); + x4 = xt1 ^ xt2 ^ xt3; + xx4 = wbsm4_key->Table[i][0][(x4 >> 24) & 0xff] ^ wbsm4_key->Table[i][1][(x4 >> 16) & 0xff] ^ wbsm4_key->Table[i][2][(x4 >> 8) & 0xff] ^ wbsm4_key->Table[i][3][x4 & 0xff]; + x4 = xx4 >> 32; + xt0 = affineU32(wbsm4_key->C[i], x0); + xt4 = affineU32(wbsm4_key->D[i], x4); + x4 = xt0 ^ xt4; + + x0 = x1; + x1 = x2; + x2 = x3; + x3 = x4; + } + + x0 = affineU32(wbsm4_key->FE[3], x0); + x1 = affineU32(wbsm4_key->FE[2], x1); + x2 = affineU32(wbsm4_key->FE[1], x2); + x3 = affineU32(wbsm4_key->FE[0], x3); + + PUT32(x3, OUT); + PUT32(x2, OUT + 4); + PUT32(x1, OUT + 8); + PUT32(x0, OUT + 12); +} \ No newline at end of file diff --git a/crypto/sm4/wb/Xiao-Lai-wbsm4.c b/crypto/sm4/wb/Xiao-Lai-wbsm4.c new file mode 100644 index 000000000..bea121d8f --- /dev/null +++ b/crypto/sm4/wb/Xiao-Lai-wbsm4.c @@ -0,0 +1,276 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/Xiao-Lai-White-box-SM4. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include +#include "crypto/wbsm4.h" +#include "WBMatrix.h" + +#define GET32(pc) ( \ + ((uint32_t)(pc)[0] << 24) ^ \ + ((uint32_t)(pc)[1] << 16) ^ \ + ((uint32_t)(pc)[2] << 8) ^ \ + ((uint32_t)(pc)[3])) + +#define PUT32(st, ct) \ + (ct)[0] = (uint8_t)((st) >> 24); \ + (ct)[1] = (uint8_t)((st) >> 16); \ + (ct)[2] = (uint8_t)((st) >> 8); \ + (ct)[3] = (uint8_t)(st) + +static uint8_t SBOX[256]={ + 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, + 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, + 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, + 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, + 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, + 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, + 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, + 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, + 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, + 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, + 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, + 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, + 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, + 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, + 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, + 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, + 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, + 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, + 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, + 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, + 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, + 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, + 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, + 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, + 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, + 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, + 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, + 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, + 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, + 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, + 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, + 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48, +}; + +static M32 L_matrix = { + .M[0] = 0xA0202080, + .M[1] = 0x50101040, + .M[2] = 0x28080820, + .M[3] = 0x14040410, + .M[4] = 0xA020208, + .M[5] = 0x5010104, + .M[6] = 0x2808082, + .M[7] = 0x1404041, + .M[8] = 0x80A02020, + .M[9] = 0x40501010, + .M[10] = 0x20280808, + .M[11] = 0x10140404, + .M[12] = 0x80A0202, + .M[13] = 0x4050101, + .M[14] = 0x82028080, + .M[15] = 0x41014040, + .M[16] = 0x2080A020, + .M[17] = 0x10405010, + .M[18] = 0x8202808, + .M[19] = 0x4101404, + .M[20] = 0x2080A02, + .M[21] = 0x1040501, + .M[22] = 0x80820280, + .M[23] = 0x40410140, + .M[24] = 0x202080A0, + .M[25] = 0x10104050, + .M[26] = 0x8082028, + .M[27] = 0x4041014, + .M[28] = 0x202080A, + .M[29] = 0x1010405, + .M[30] = 0x80808202, + .M[31] = 0x40404101}; + +void wbsm4_xiaolai_set_key(const uint8_t *key, wbsm4_xiaolai_key *wbsm4_key) +{ + DECLARE_IS_ENDIAN; + + *wbsm4_key = *(wbsm4_xiaolai_key *)key; + if (IS_LITTLE_ENDIAN) + return; + + uint8_t *p = (uint8_t *)wbsm4_key; + uint8_t *end = p + sizeof(wbsm4_xiaolai_key); + while (p < end) + { + uint8_t t; + t = p[0]; + p[0] = p[3]; + p[3] = t; + + t = p[1]; + p[1] = p[2]; + p[2] = t; + + p += 4; + } +} + +void wbsm4_xiaolai_export_key(const wbsm4_xiaolai_key *wbsm4_key, uint8_t *key) +{ + DECLARE_IS_ENDIAN; + + wbsm4_xiaolai_key *out = (wbsm4_xiaolai_key *)key; + *out = *wbsm4_key; + if (IS_LITTLE_ENDIAN) + return; + + uint8_t *p = (uint8_t *)out; + uint8_t *end = p + sizeof(wbsm4_xiaolai_key); + while (p < end) + { + uint8_t t; + t = p[0]; + p[0] = p[3]; + p[3] = t; + + t = p[1]; + p[1] = p[2]; + p[2] = t; + + p += 4; + } +} + +void wbsm4_xiaolai_gen(const uint8_t *sm4_key, wbsm4_xiaolai_key *wbsm4_key) +{ + int i, j, x; + Aff32 P[36]; + Aff32 P_inv[36]; + Aff8 Eij[32][4]; + Aff8 Eij_inv[32][4]; + Aff32 Ei_inv[32]; + Aff32 Q[32]; + Aff32 Q_inv[32]; + + uint32_t SK[32]; + wbsm4_sm4_setkey(SK, sm4_key); + + for (i = 0; i < 36; i++) + { + // affine P + genaffinepairM32(&P[i], &P_inv[i]); + } + + for (i = 0; i < 32; i++) + { + // affine E + for (j = 0; j < 4; j++) + { + genaffinepairM8(&Eij[i][j], &Eij_inv[i][j]); + } + + // combine 4 E8 to 1 E32 + affinecomM8to32(Eij_inv[i][0], Eij_inv[i][1], Eij_inv[i][2], Eij_inv[i][3], &Ei_inv[i]); + + // affine M + affinemixM32(Ei_inv[i], P_inv[i + 1], &wbsm4_key->M[i][0]); + affinemixM32(Ei_inv[i], P_inv[i + 2], &wbsm4_key->M[i][1]); + affinemixM32(Ei_inv[i], P_inv[i + 3], &wbsm4_key->M[i][2]); + + // affine Q + genaffinepairM32(&Q[i], &Q_inv[i]); + + // affine C D, C for Xi0, D for T(Xi1+Xi2+Xi3+rk) + affinemixM32(P[i + 4], P_inv[i], &wbsm4_key->C[i]); + affinemixM32(P[i + 4], Q_inv[i], &wbsm4_key->D[i]); + uint32_t temp_u32 = cus_random(); + wbsm4_key->C[i].Vec.V ^= temp_u32; + wbsm4_key->D[i].Vec.V ^= P[i + 4].Vec.V ^ temp_u32; + } + + for (i = 0; i < 32; i++) + { + // combine QL + M32 QL; + MatMulMatM32(Q[i].Mat, L_matrix, &QL); + + uint32_t Q_constant[3] = {0}; + for (j = 0; j < 3; j++) + { + Q_constant[j] = cus_random(); + } + + for (x = 0; x < 256; x++) + { + for (j = 0; j < 4; j++) + { + uint8_t temp_u8 = affineU8(Eij[i][j], x); + temp_u8 = SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)]; + uint32_t temp_32 = temp_u8 << (24 - j * 8); + wbsm4_key->Table[i][j][x] = MatMulNumM32(QL, temp_32); + } + for (j = 0; j < 3; j++) + { + wbsm4_key->Table[i][j][x] ^= Q_constant[j]; + } + wbsm4_key->Table[i][3][x] ^= Q[i].Vec.V ^ Q_constant[0] ^ Q_constant[1] ^ Q_constant[2]; + } + } + + // external encoding + for (i = 0; i < 4; i++) + { + wbsm4_key->SE[i].Mat = P[i].Mat; + wbsm4_key->SE[i].Vec = P[i].Vec; + + wbsm4_key->FE[i].Mat = P_inv[35 - i].Mat; + wbsm4_key->FE[i].Vec = P_inv[35 - i].Vec; + } +} + +void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_xiaolai_key *wbsm4_key) +{ + int i; + uint32_t x0, x1, x2, x3, x4; + uint32_t xt0, xt1, xt2, xt3, xt4; + + x0 = GET32(IN); + x1 = GET32(IN + 4); + x2 = GET32(IN + 8); + x3 = GET32(IN + 12); + x0 = affineU32(wbsm4_key->SE[0], x0); + x1 = affineU32(wbsm4_key->SE[1], x1); + x2 = affineU32(wbsm4_key->SE[2], x2); + x3 = affineU32(wbsm4_key->SE[3], x3); + + for (i = 0; i < 32; i++) + { + xt1 = affineU32(wbsm4_key->M[i][0], x1); + xt2 = affineU32(wbsm4_key->M[i][1], x2); + xt3 = affineU32(wbsm4_key->M[i][2], x3); + x4 = xt1 ^ xt2 ^ xt3; + x4 = wbsm4_key->Table[i][0][(x4 >> 24) & 0xff] ^ wbsm4_key->Table[i][1][(x4 >> 16) & 0xff] ^ wbsm4_key->Table[i][2][(x4 >> 8) & 0xff] ^ wbsm4_key->Table[i][3][x4 & 0xff]; + xt0 = affineU32(wbsm4_key->C[i], x0); + xt4 = affineU32(wbsm4_key->D[i], x4); + x4 = xt0 ^ xt4; + + x0 = x1; + x1 = x2; + x2 = x3; + x3 = x4; + } + + x0 = affineU32(wbsm4_key->FE[3], x0); + x1 = affineU32(wbsm4_key->FE[2], x1); + x2 = affineU32(wbsm4_key->FE[1], x2); + x3 = affineU32(wbsm4_key->FE[0], x3); + PUT32(x3, OUT); + PUT32(x2, OUT + 4); + PUT32(x1, OUT + 8); + PUT32(x0, OUT + 12); +} \ No newline at end of file diff --git a/crypto/sm4/wb/wbsm4.c b/crypto/sm4/wb/wbsm4.c new file mode 100644 index 000000000..7d0a6aa3c --- /dev/null +++ b/crypto/sm4/wb/wbsm4.c @@ -0,0 +1,92 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include "crypto/wbsm4.h" + +#define GET32(pc) ( \ + ((uint32_t)(pc)[0] << 24) ^ \ + ((uint32_t)(pc)[1] << 16) ^ \ + ((uint32_t)(pc)[2] << 8) ^ \ + ((uint32_t)(pc)[3])) + +#define PUT32(st, ct) \ + (ct)[0] = (uint8_t)((st) >> 24); \ + (ct)[1] = (uint8_t)((st) >> 16); \ + (ct)[2] = (uint8_t)((st) >> 8); \ + (ct)[3] = (uint8_t)(st) + +static const uint8_t SM4_S[256] = { + 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, + 0x28, 0xFB, 0x2C, 0x05, 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, + 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, 0x9C, 0x42, 0x50, 0xF4, + 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, + 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, + 0x75, 0x8F, 0x3F, 0xA6, 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, + 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, 0x68, 0x6B, 0x81, 0xB2, + 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, + 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, + 0x01, 0x21, 0x78, 0x87, 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, + 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, 0xEA, 0xBF, 0x8A, 0xD2, + 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, + 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, + 0xF5, 0x8C, 0xB1, 0xE3, 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, + 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, 0xD5, 0xDB, 0x37, 0x45, + 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, + 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, + 0x1F, 0x10, 0x5A, 0xD8, 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, + 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, 0x89, 0x69, 0x97, 0x4A, + 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, + 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, + 0xD7, 0xCB, 0x39, 0x48}; + +static const uint32_t FK[4] = { + 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc}; + +static const uint32_t CK[32] = { + 0x00070E15, 0x1C232A31, 0x383F464D, 0x545B6269, + 0x70777E85, 0x8C939AA1, 0xA8AFB6BD, 0xC4CBD2D9, + 0xE0E7EEF5, 0xFC030A11, 0x181F262D, 0x343B4249, + 0x50575E65, 0x6C737A81, 0x888F969D, 0xA4ABB2B9, + 0xC0C7CED5, 0xDCE3EAF1, 0xF8FF060D, 0x141B2229, + 0x30373E45, 0x4C535A61, 0x686F767D, 0x848B9299, + 0xA0A7AEB5, 0xBCC3CAD1, 0xD8DFE6ED, 0xF4FB0209, + 0x10171E25, 0x2C333A41, 0x484F565D, 0x646B7279}; + +static ossl_inline uint32_t rotl(uint32_t a, uint8_t n) +{ + return (a << n) | (a >> (32 - n)); +} + +void wbsm4_sm4_setkey(uint32_t SK[32], const uint8_t key[16]) +{ + uint32_t K[36]; + + K[0] = GET32(key) ^ FK[0]; + K[1] = GET32(key + 4) ^ FK[1]; + K[2] = GET32(key + 8) ^ FK[2]; + K[3] = GET32(key + 12) ^ FK[3]; + + for (uint32_t i = 0; i != 32; ++i) + { + uint32_t X = K[(i + 1)] ^ K[(i + 2)] ^ K[(i + 3)] ^ CK[i]; + uint32_t t = 0; + + t |= ((uint32_t)SM4_S[(uint8_t)(X >> 24)]) << 24; + t |= ((uint32_t)SM4_S[(uint8_t)(X >> 16)]) << 16; + t |= ((uint32_t)SM4_S[(uint8_t)(X >> 8)]) << 8; + t |= SM4_S[(uint8_t)X]; + t = t ^ rotl(t, 13) ^ rotl(t, 23); + + K[i + 4] = K[i] ^ t; + SK[i] = K[i + 4]; + } +} \ No newline at end of file diff --git a/include/crypto/wbsm4.h b/include/crypto/wbsm4.h new file mode 100644 index 000000000..8e2246a50 --- /dev/null +++ b/include/crypto/wbsm4.h @@ -0,0 +1,74 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#ifndef _WBSM4_H_ +#define _WBSM4_H_ + +#include + +# ifdef OPENSSL_NO_WBSM4 +# error WBSM4 is disabled. +# endif + +#include "wbstructure.h" + +void wbsm4_sm4_setkey(uint32_t SK[32], const uint8_t key[16]); + +#pragma pack(push, 1) +typedef struct +{ + Aff32 M[32][3]; + Aff32 C[32]; + Aff32 D[32]; + Aff32 SE[4]; + Aff32 FE[4]; + uint32_t Table[32][4][256]; +} wbsm4_xiaolai_key; +#pragma pack(pop) + +void wbsm4_xiaolai_gen(const uint8_t *sm4_key, wbsm4_xiaolai_key *wbsm4_key); +void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_xiaolai_key *wbsm4_key); +void wbsm4_xiaolai_set_key(const uint8_t *key, wbsm4_xiaolai_key *wbsm4_key); +void wbsm4_xiaolai_export_key(const wbsm4_xiaolai_key *wbsm4_key, uint8_t *key); + +#pragma pack(push, 1) +typedef struct +{ + Aff32 SE[4]; + Aff32 FE[4]; + uint32_t TD[32][4][4][256]; + uint32_t TR[32][4][256][256]; +} wbsm4_baiwu_key; +#pragma pack(pop) + +void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key); +void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_baiwu_key *wbsm4_key); +void wbsm4_baiwu_set_key(const uint8_t *key, wbsm4_baiwu_key *wbsm4_key); +void wbsm4_baiwu_export_key(const wbsm4_baiwu_key *wbsm4_key, uint8_t *key); + +#pragma pack(push, 1) +typedef struct +{ + Aff32 M[32][3]; + Aff32 C[32]; + Aff32 D[32]; + Aff32 SE[4]; + Aff32 FE[4]; + uint64_t Table[32][4][256]; +} wbsm4_wsise_key; +#pragma pack(pop) + +void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key); +void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_wsise_key *wbsm4_key); +void wbsm4_wsise_set_key(const uint8_t *key, wbsm4_wsise_key *wbsm4_key); +void wbsm4_wsise_export_key(const wbsm4_wsise_key *wbsm4_key, uint8_t *key); + +#endif // _WBSM4_H_ \ No newline at end of file diff --git a/include/crypto/wbstructure.h b/include/crypto/wbstructure.h new file mode 100644 index 000000000..3aad54091 --- /dev/null +++ b/include/crypto/wbstructure.h @@ -0,0 +1,136 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2024 Nexus-TYF. All Rights Reserved. + * Ported from Nexus-TYF/WBMatrix. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#ifndef _WBSTRUCTURE_H_ +#define _WBSTRUCTURE_H_ + +#include + +// 4 bits +typedef struct M4 +{ + uint8_t M[4]; +} M4; + +typedef struct V4 +{ + uint8_t V; +} V4; + +typedef struct Aff4 +{ + M4 Mat; + V4 Vec; +} Aff4; + +// 8 bits +typedef struct M8 +{ + uint8_t M[8]; +} M8; + +typedef struct V8 +{ + uint8_t V; +} V8; + +typedef struct Aff8 +{ + M8 Mat; + V8 Vec; +} Aff8; + +// 16 bits +typedef struct M16 +{ + uint16_t M[16]; +} M16; + +typedef struct V16 +{ + uint16_t V; +} V16; + +typedef struct Aff16 +{ + M16 Mat; + V16 Vec; +} Aff16; + +// 32 bits +typedef struct M32 +{ + uint32_t M[32]; +} M32; + +typedef struct V32 +{ + uint32_t V; +} V32; + +typedef struct Aff32 +{ + M32 Mat; + V32 Vec; +} Aff32; + +// 64 bits +typedef struct M64 +{ + uint64_t M[64]; +} M64; + +typedef struct V64 +{ + uint64_t V; +} V64; + +typedef struct Aff64 +{ + M64 Mat; + V64 Vec; +} Aff64; + +// 128 bits +typedef struct M128 +{ + uint64_t M[128][2]; +} M128; + +typedef struct V128 +{ + uint64_t V[2]; +} V128; + +typedef struct Aff128 +{ + M128 Mat; + V128 Vec; +} Aff128; + +// 256 bits +typedef struct M256 +{ + uint64_t M[256][4]; +} M256; + +typedef struct V256 +{ + uint64_t V[4]; +} V256; + +typedef struct Aff256 +{ + M256 Mat; + V256 Vec; +} Aff256; + +#endif \ No newline at end of file diff --git a/include/openssl/evp.h b/include/openssl/evp.h index ec26e09af..5f8cd2ecc 100644 --- a/include/openssl/evp.h +++ b/include/openssl/evp.h @@ -1035,6 +1035,35 @@ const EVP_CIPHER *EVP_sm4_gcm(void); const EVP_CIPHER *EVP_sm4_ccm(void); # endif +# ifndef OPENSSL_NO_WBSM4 +const EVP_CIPHER *EVP_wbsm4_xiaolai_ecb(void); +const EVP_CIPHER *EVP_wbsm4_xiaolai_cbc(void); +const EVP_CIPHER *EVP_wbsm4_xiaolai_cfb128(void); +# define EVP_wbsm4_xiaolai_cfb EVP_wbsm4_xiaolai_cfb128 +const EVP_CIPHER *EVP_wbsm4_xiaolai_ofb(void); +const EVP_CIPHER *EVP_wbsm4_xiaolai_ctr(void); +const EVP_CIPHER *EVP_wbsm4_xiaolai_gcm(void); +const EVP_CIPHER *EVP_wbsm4_xiaolai_ccm(void); + +const EVP_CIPHER *EVP_wbsm4_baiwu_ecb(void); +const EVP_CIPHER *EVP_wbsm4_baiwu_cbc(void); +const EVP_CIPHER *EVP_wbsm4_baiwu_cfb128(void); +# define EVP_wbsm4_baiwu_cfb EVP_wbsm4_baiwu_cfb128 +const EVP_CIPHER *EVP_wbsm4_baiwu_ofb(void); +const EVP_CIPHER *EVP_wbsm4_baiwu_ctr(void); +const EVP_CIPHER *EVP_wbsm4_baiwu_gcm(void); +const EVP_CIPHER *EVP_wbsm4_baiwu_ccm(void); + +const EVP_CIPHER *EVP_wbsm4_wsise_ecb(void); +const EVP_CIPHER *EVP_wbsm4_wsise_cbc(void); +const EVP_CIPHER *EVP_wbsm4_wsise_cfb128(void); +# define EVP_wbsm4_wsise_cfb EVP_wbsm4_wsise_cfb128 +const EVP_CIPHER *EVP_wbsm4_wsise_ofb(void); +const EVP_CIPHER *EVP_wbsm4_wsise_ctr(void); +const EVP_CIPHER *EVP_wbsm4_wsise_gcm(void); +const EVP_CIPHER *EVP_wbsm4_wsise_ccm(void); +# endif + # ifndef OPENSSL_NO_ZUC const EVP_CIPHER *EVP_eea3(void); # endif diff --git a/include/openssl/obj_mac.h b/include/openssl/obj_mac.h index 242d7b933..fc09cbda3 100644 --- a/include/openssl/obj_mac.h +++ b/include/openssl/obj_mac.h @@ -4411,4 +4411,88 @@ #define NID_oracle_jdk_trustedkeyusage 1259 #define OBJ_oracle_jdk_trustedkeyusage OBJ_oracle,746875L,1L,1L +#define SN_wbsm4_xiaolai_ecb "WBSM4-XIAOLAI-ECB" +#define LN_wbsm4_xiaolai_ecb "wbsm4-xiaolai-ecb" +#define NID_wbsm4_xiaolai_ecb 1260 + +#define SN_wbsm4_xiaolai_cbc "WBSM4-XIAOLAI-CBC" +#define LN_wbsm4_xiaolai_cbc "wbsm4-xiaolai-cbc" +#define NID_wbsm4_xiaolai_cbc 1261 + +#define SN_wbsm4_xiaolai_ofb128 "WBSM4-XIAOLAI-OFB" +#define LN_wbsm4_xiaolai_ofb128 "wbsm4-xiaolai-ofb" +#define NID_wbsm4_xiaolai_ofb128 1262 + +#define SN_wbsm4_xiaolai_cfb128 "WBSM4-XIAOLAI-CFB" +#define LN_wbsm4_xiaolai_cfb128 "wbsm4-xiaolai-cfb" +#define NID_wbsm4_xiaolai_cfb128 1263 + +#define SN_wbsm4_xiaolai_ctr "WBSM4-XIAOLAI-CTR" +#define LN_wbsm4_xiaolai_ctr "wbsm4-xiaolai-ctr" +#define NID_wbsm4_xiaolai_ctr 1264 + +#define SN_wbsm4_xiaolai_gcm "WBSM4-XIAOLAI-GCM" +#define LN_wbsm4_xiaolai_gcm "wbsm4-xiaolai-gcm" +#define NID_wbsm4_xiaolai_gcm 1265 + +#define SN_wbsm4_xiaolai_ccm "WBSM4-XIAOLAI-CCM" +#define LN_wbsm4_xiaolai_ccm "wbsm4-xiaolai-ccm" +#define NID_wbsm4_xiaolai_ccm 1266 + +#define SN_wbsm4_baiwu_ecb "WBSM4-BAIWU-ECB" +#define LN_wbsm4_baiwu_ecb "wbsm4-baiwu-ecb" +#define NID_wbsm4_baiwu_ecb 1267 + +#define SN_wbsm4_baiwu_cbc "WBSM4-BAIWU-CBC" +#define LN_wbsm4_baiwu_cbc "wbsm4-baiwu-cbc" +#define NID_wbsm4_baiwu_cbc 1268 + +#define SN_wbsm4_baiwu_ofb128 "WBSM4-BAIWU-OFB" +#define LN_wbsm4_baiwu_ofb128 "wbsm4-baiwu-ofb" +#define NID_wbsm4_baiwu_ofb128 1277 + +#define SN_wbsm4_baiwu_cfb128 "WBSM4-BAIWU-CFB" +#define LN_wbsm4_baiwu_cfb128 "wbsm4-baiwu-cfb" +#define NID_wbsm4_baiwu_cfb128 1278 + +#define SN_wbsm4_baiwu_ctr "WBSM4-BAIWU-CTR" +#define LN_wbsm4_baiwu_ctr "wbsm4-baiwu-ctr" +#define NID_wbsm4_baiwu_ctr 1269 + +#define SN_wbsm4_baiwu_gcm "WBSM4-BAIWU-GCM" +#define LN_wbsm4_baiwu_gcm "wbsm4-baiwu-gcm" +#define NID_wbsm4_baiwu_gcm 1270 + +#define SN_wbsm4_baiwu_ccm "WBSM4-BAIWU-CCM" +#define LN_wbsm4_baiwu_ccm "wbsm4-baiwu-ccm" +#define NID_wbsm4_baiwu_ccm 1271 + +#define SN_wbsm4_wsise_ecb "WBSM4-WSISE-ECB" +#define LN_wbsm4_wsise_ecb "wbsm4-wsise-ecb" +#define NID_wbsm4_wsise_ecb 1272 + +#define SN_wbsm4_wsise_cbc "WBSM4-WSISE-CBC" +#define LN_wbsm4_wsise_cbc "wbsm4-wsise-cbc" +#define NID_wbsm4_wsise_cbc 1273 + +#define SN_wbsm4_wsise_ofb128 "WBSM4-WSISE-OFB" +#define LN_wbsm4_wsise_ofb128 "wbsm4-wsise-ofb" +#define NID_wbsm4_wsise_ofb128 1279 + +#define SN_wbsm4_wsise_cfb128 "WBSM4-WSISE-CFB" +#define LN_wbsm4_wsise_cfb128 "wbsm4-wsise-cfb" +#define NID_wbsm4_wsise_cfb128 1280 + +#define SN_wbsm4_wsise_ctr "WBSM4-WSISE-CTR" +#define LN_wbsm4_wsise_ctr "wbsm4-wsise-ctr" +#define NID_wbsm4_wsise_ctr 1274 + +#define SN_wbsm4_wsise_gcm "WBSM4-WSISE-GCM" +#define LN_wbsm4_wsise_gcm "wbsm4-wsise-gcm" +#define NID_wbsm4_wsise_gcm 1275 + +#define SN_wbsm4_wsise_ccm "WBSM4-WSISE-CCM" +#define LN_wbsm4_wsise_ccm "wbsm4-wsise-ccm" +#define NID_wbsm4_wsise_ccm 1276 + #endif /* OPENSSL_OBJ_MAC_H */ diff --git a/providers/defltprov.c b/providers/defltprov.c index 5487a0cad..e162b40d0 100644 --- a/providers/defltprov.c +++ b/providers/defltprov.c @@ -226,6 +226,31 @@ static const OSSL_ALGORITHM_CAPABLE deflt_ciphers[] = { ALG(PROV_NAMES_SM4_GCM, ossl_sm4128gcm_functions), ALG(PROV_NAMES_SM4_CCM, ossl_sm4128ccm_functions), #endif /* OPENSSL_NO_SM4 */ +#ifndef OPENSSL_NO_WBSM4 + ALG(PROV_NAMES_WBSM4_XIAOLAI_ECB, ossl_wbsm4_xiaolai1225984ecb_functions), + ALG(PROV_NAMES_WBSM4_XIAOLAI_CBC, ossl_wbsm4_xiaolai1225984cbc_functions), + ALG(PROV_NAMES_WBSM4_XIAOLAI_CTR, ossl_wbsm4_xiaolai1225984ctr_functions), + ALG(PROV_NAMES_WBSM4_XIAOLAI_OFB, ossl_wbsm4_xiaolai1225984ofb128_functions), + ALG(PROV_NAMES_WBSM4_XIAOLAI_CFB, ossl_wbsm4_xiaolai1225984cfb128_functions), + ALG(PROV_NAMES_WBSM4_XIAOLAI_GCM, ossl_wbsm4_xiaolai1225984gcm_functions), + ALG(PROV_NAMES_WBSM4_XIAOLAI_CCM, ossl_wbsm4_xiaolai1225984ccm_functions), + + ALG(PROV_NAMES_WBSM4_BAIWU_ECB, ossl_wbsm4_baiwu272638208ecb_functions), + ALG(PROV_NAMES_WBSM4_BAIWU_CBC, ossl_wbsm4_baiwu272638208cbc_functions), + ALG(PROV_NAMES_WBSM4_BAIWU_CTR, ossl_wbsm4_baiwu272638208ctr_functions), + ALG(PROV_NAMES_WBSM4_BAIWU_OFB, ossl_wbsm4_baiwu272638208ofb128_functions), + ALG(PROV_NAMES_WBSM4_BAIWU_CFB, ossl_wbsm4_baiwu272638208cfb128_functions), + ALG(PROV_NAMES_WBSM4_BAIWU_GCM, ossl_wbsm4_baiwu272638208gcm_functions), + ALG(PROV_NAMES_WBSM4_BAIWU_CCM, ossl_wbsm4_baiwu272638208ccm_functions), + + ALG(PROV_NAMES_WBSM4_WSISE_ECB, ossl_wbsm4_wsise2274560ecb_functions), + ALG(PROV_NAMES_WBSM4_WSISE_CBC, ossl_wbsm4_wsise2274560cbc_functions), + ALG(PROV_NAMES_WBSM4_WSISE_CTR, ossl_wbsm4_wsise2274560ctr_functions), + ALG(PROV_NAMES_WBSM4_WSISE_OFB, ossl_wbsm4_wsise2274560ofb128_functions), + ALG(PROV_NAMES_WBSM4_WSISE_CFB, ossl_wbsm4_wsise2274560cfb128_functions), + ALG(PROV_NAMES_WBSM4_WSISE_GCM, ossl_wbsm4_wsise2274560gcm_functions), + ALG(PROV_NAMES_WBSM4_WSISE_CCM, ossl_wbsm4_wsise2274560ccm_functions), +#endif /* OPENSSL_NO_WBSM4 */ #ifndef OPENSSL_NO_CHACHA ALG(PROV_NAMES_ChaCha20, ossl_chacha20_functions), # ifndef OPENSSL_NO_POLY1305 @@ -275,6 +300,9 @@ static const OSSL_ALGORITHM deflt_kdfs[] = { { PROV_NAMES_SCRYPT, "provider=default", ossl_kdf_scrypt_functions }, #endif { PROV_NAMES_KRB5KDF, "provider=default", ossl_kdf_krb5kdf_functions }, +#ifndef OPENSSL_NO_WBSM4 + { PROV_NAMES_WBSM4KDF, "provider=default", ossl_kdf_wbsm4_functions }, +#endif { NULL, NULL, NULL } }; diff --git a/providers/implementations/ciphers/build.info b/providers/implementations/ciphers/build.info index 1eff440ae..a5cf3f380 100644 --- a/providers/implementations/ciphers/build.info +++ b/providers/implementations/ciphers/build.info @@ -70,6 +70,12 @@ IF[{- !$disabled{sm4} -}] cipher_sm4.c cipher_sm4_hw.c \ cipher_sm4_gcm.c cipher_sm4_gcm_hw.c \ cipher_sm4_ccm.c cipher_sm4_ccm_hw.c + IF[{- !$disabled{wbsm4} -}] + SOURCE[$SM4_GOAL]= ${SOURCE[$SM4_GOAL]}\ + cipher_wbsm4.c cipher_wbsm4_hw.c \ + cipher_wbsm4_gcm.c cipher_wbsm4_gcm_hw.c \ + cipher_wbsm4_ccm.c cipher_wbsm4_ccm_hw.c + ENDIF ENDIF IF[{- !$disabled{ocb} -}] diff --git a/providers/implementations/ciphers/cipher_wbsm4.c b/providers/implementations/ciphers/cipher_wbsm4.c new file mode 100644 index 000000000..e943fa7e8 --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4.c @@ -0,0 +1,141 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2019-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +/* Dispatch functions for cast cipher modes ecb, cbc, ofb, cfb */ + +#include "cipher_wbsm4.h" +#include "prov/implementations.h" +#include "prov/providercommon.h" + +// xiaolai +static OSSL_FUNC_cipher_freectx_fn wbsm4_xiaolai_freectx; +static OSSL_FUNC_cipher_dupctx_fn wbsm4_xiaolai_dupctx; + +static void wbsm4_xiaolai_freectx(void *vctx) +{ + PROV_WBSM4_XIAOLAI_CTX *ctx = (PROV_WBSM4_XIAOLAI_CTX *)vctx; + + ossl_cipher_generic_reset_ctx((PROV_CIPHER_CTX *)vctx); + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +static void *wbsm4_xiaolai_dupctx(void *ctx) +{ + PROV_WBSM4_XIAOLAI_CTX *in = (PROV_WBSM4_XIAOLAI_CTX *)ctx; + PROV_WBSM4_XIAOLAI_CTX *ret; + + if (!ossl_prov_is_running()) + return NULL; + + ret = OPENSSL_malloc(sizeof(*ret)); + if (ret == NULL) + { + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return NULL; + } + in->base.hw->copyctx(&ret->base, &in->base); + + return ret; +} + +/* ossl_wbsm4_xiaolai1225984ecb_functions */ +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ecb, ECB, 0, 1225984, 128, 0, block); +/* ossl_wbsm4_xiaolai1225984cbc_functions */ +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, cbc, CBC, 0, 1225984, 128, 128, block); +/* ossl_wbsm4_xiaolai1225984ctr_functions */ +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ctr, CTR, 0, 1225984, 8, 128, stream); +/* ossl_wbsm4_xiaolai1225984ofb128_functions */ +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ofb128, OFB, 0, 1225984, 8, 128, stream); +/* ossl_wbsm4_xiaolai1225984cfb128_functions */ +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, cfb128, CFB, 0, 1225984, 8, 128, stream); + +// baiwu +static OSSL_FUNC_cipher_freectx_fn wbsm4_baiwu_freectx; +static OSSL_FUNC_cipher_dupctx_fn wbsm4_baiwu_dupctx; + +static void wbsm4_baiwu_freectx(void *vctx) +{ + PROV_WBSM4_BAIWU_CTX *ctx = (PROV_WBSM4_BAIWU_CTX *)vctx; + + ossl_cipher_generic_reset_ctx((PROV_CIPHER_CTX *)vctx); + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +static void *wbsm4_baiwu_dupctx(void *ctx) +{ + PROV_WBSM4_BAIWU_CTX *in = (PROV_WBSM4_BAIWU_CTX *)ctx; + PROV_WBSM4_BAIWU_CTX *ret; + + if (!ossl_prov_is_running()) + return NULL; + + ret = OPENSSL_malloc(sizeof(*ret)); + if (ret == NULL) + { + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return NULL; + } + in->base.hw->copyctx(&ret->base, &in->base); + + return ret; +} + +/* ossl_wbsm4_baiwu272638208ecb_functions */ +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ecb, ECB, 0, 272638208, 128, 0, block); +/* ossl_wbsm4_baiwu272638208cbc_functions */ +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, cbc, CBC, 0, 272638208, 128, 128, block); +/* ossl_wbsm4_baiwu272638208ctr_functions */ +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ctr, CTR, 0, 272638208, 8, 128, stream); +/* ossl_wbsm4_baiwu272638208ofb128_functions */ +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ofb128, OFB, 0, 272638208, 8, 128, stream); +/* ossl_wbsm4_baiwu272638208cfb128_functions */ +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, cfb128, CFB, 0, 272638208, 8, 128, stream); + +// wsise +static OSSL_FUNC_cipher_freectx_fn wbsm4_wsise_freectx; +static OSSL_FUNC_cipher_dupctx_fn wbsm4_wsise_dupctx; + +static void wbsm4_wsise_freectx(void *vctx) +{ + PROV_WBSM4_WSISE_CTX *ctx = (PROV_WBSM4_WSISE_CTX *)vctx; + + ossl_cipher_generic_reset_ctx((PROV_CIPHER_CTX *)vctx); + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +static void *wbsm4_wsise_dupctx(void *ctx) +{ + PROV_WBSM4_WSISE_CTX *in = (PROV_WBSM4_WSISE_CTX *)ctx; + PROV_WBSM4_WSISE_CTX *ret; + + if (!ossl_prov_is_running()) + return NULL; + + ret = OPENSSL_malloc(sizeof(*ret)); + if (ret == NULL) + { + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return NULL; + } + in->base.hw->copyctx(&ret->base, &in->base); + + return ret; +} + +/* ossl_wbsm4_wsise2274560ecb_functions */ +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ecb, ECB, 0, 2274560, 128, 0, block); +/* ossl_wbsm4_wsise2274560cbc_functions */ +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, cbc, CBC, 0, 2274560, 128, 128, block); +/* ossl_wbsm4_wsise2274560ctr_functions */ +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ctr, CTR, 0, 2274560, 8, 128, stream); +/* ossl_wbsm4_wsise2274560ofb128_functions */ +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ofb128, OFB, 0, 2274560, 8, 128, stream); +/* ossl_wbsm4_wsise2274560cfb128_functions */ +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, cfb128, CFB, 0, 2274560, 8, 128, stream); diff --git a/providers/implementations/ciphers/cipher_wbsm4.h b/providers/implementations/ciphers/cipher_wbsm4.h new file mode 100644 index 000000000..c602dd5cc --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4.h @@ -0,0 +1,63 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2019-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include "prov/ciphercommon.h" +#include "crypto/wbsm4.h" + +// xiaolai +typedef struct +{ + PROV_CIPHER_CTX base; /* Must be first */ + union + { + OSSL_UNION_ALIGN; + wbsm4_xiaolai_key ks; + } ks; +} PROV_WBSM4_XIAOLAI_CTX; + +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_cbc(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_ecb(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_ctr(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_ofb128(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_cfb128(size_t keybits); + +// baiwu +typedef struct +{ + PROV_CIPHER_CTX base; /* Must be first */ + union + { + OSSL_UNION_ALIGN; + wbsm4_baiwu_key ks; + } ks; +} PROV_WBSM4_BAIWU_CTX; + +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_cbc(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_ecb(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_ctr(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_ofb128(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_cfb128(size_t keybits); + +// wsise +typedef struct +{ + PROV_CIPHER_CTX base; /* Must be first */ + union + { + OSSL_UNION_ALIGN; + wbsm4_wsise_key ks; + } ks; +} PROV_WBSM4_WSISE_CTX; + +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_cbc(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_ecb(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_ctr(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_ofb128(size_t keybits); +const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_cfb128(size_t keybits); diff --git a/providers/implementations/ciphers/cipher_wbsm4_ccm.c b/providers/implementations/ciphers/cipher_wbsm4_ccm.c new file mode 100644 index 000000000..2d207a5c9 --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_ccm.c @@ -0,0 +1,90 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include "internal/deprecated.h" + +#include "cipher_wbsm4_ccm.h" +#include "prov/implementations.h" +#include "prov/providercommon.h" + +// xiaolai +static void *wbsm4_xiaolai_ccm_newctx(void *provctx, size_t keybits) +{ + PROV_WBSM4_XIAOLAI_CCM_CTX *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx != NULL) + ossl_ccm_initctx(&ctx->base, keybits, ossl_prov_wbsm4_xiaolai_hw_ccm(keybits)); + return ctx; +} + +static OSSL_FUNC_cipher_freectx_fn wbsm4_xiaolai_ccm_freectx; +static void wbsm4_xiaolai_ccm_freectx(void *vctx) +{ + PROV_WBSM4_XIAOLAI_CCM_CTX *ctx = (PROV_WBSM4_XIAOLAI_CCM_CTX *)vctx; + + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +/* ossl_wbsm4_xiaolai1225984ccm_functions */ +IMPLEMENT_aead_cipher(wbsm4_xiaolai, ccm, CCM, AEAD_FLAGS, 1225984, 8, 96); + +// baiwu +static void *wbsm4_baiwu_ccm_newctx(void *provctx, size_t keybits) +{ + PROV_WBSM4_BAIWU_CCM_CTX *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx != NULL) + ossl_ccm_initctx(&ctx->base, keybits, ossl_prov_wbsm4_baiwu_hw_ccm(keybits)); + return ctx; +} + +static OSSL_FUNC_cipher_freectx_fn wbsm4_baiwu_ccm_freectx; +static void wbsm4_baiwu_ccm_freectx(void *vctx) +{ + PROV_WBSM4_BAIWU_CCM_CTX *ctx = (PROV_WBSM4_BAIWU_CCM_CTX *)vctx; + + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +/* ossl_wbsm4_baiwu272638208ccm_functions */ +IMPLEMENT_aead_cipher(wbsm4_baiwu, ccm, CCM, AEAD_FLAGS, 272638208, 8, 96); + +// wsise +static void *wbsm4_wsise_ccm_newctx(void *provctx, size_t keybits) +{ + PROV_WBSM4_WSISE_CCM_CTX *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx != NULL) + ossl_ccm_initctx(&ctx->base, keybits, ossl_prov_wbsm4_wsise_hw_ccm(keybits)); + return ctx; +} + +static OSSL_FUNC_cipher_freectx_fn wbsm4_wsise_ccm_freectx; +static void wbsm4_wsise_ccm_freectx(void *vctx) +{ + PROV_WBSM4_WSISE_CCM_CTX *ctx = (PROV_WBSM4_WSISE_CCM_CTX *)vctx; + + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +/* ossl_wbsm4_wsise2274560ccm_functions */ +IMPLEMENT_aead_cipher(wbsm4_wsise, ccm, CCM, AEAD_FLAGS, 2274560, 8, 96); diff --git a/providers/implementations/ciphers/cipher_wbsm4_ccm.h b/providers/implementations/ciphers/cipher_wbsm4_ccm.h new file mode 100644 index 000000000..57e63f1e7 --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_ccm.h @@ -0,0 +1,52 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include "prov/ciphercommon.h" +#include "prov/ciphercommon_ccm.h" + +// xiaolai +typedef struct prov_wbsm4_xiaolai_ccm_ctx_st +{ + PROV_CCM_CTX base; /* must be first entry in struct */ + union + { + OSSL_UNION_ALIGN; + wbsm4_xiaolai_key ks; + } ks; /* WBSM4 key schedule to use */ +} PROV_WBSM4_XIAOLAI_CCM_CTX; + +const PROV_CCM_HW *ossl_prov_wbsm4_xiaolai_hw_ccm(size_t keybits); + +// baiwu +typedef struct prov_wbsm4_baiwu_ccm_ctx_st +{ + PROV_CCM_CTX base; /* must be first entry in struct */ + union + { + OSSL_UNION_ALIGN; + wbsm4_baiwu_key ks; + } ks; /* WBSM4 key schedule to use */ +} PROV_WBSM4_BAIWU_CCM_CTX; + +const PROV_CCM_HW *ossl_prov_wbsm4_baiwu_hw_ccm(size_t keybits); + +// wsise +typedef struct prov_wbsm4_wsise_ccm_ctx_st +{ + PROV_CCM_CTX base; /* must be first entry in struct */ + union + { + OSSL_UNION_ALIGN; + wbsm4_wsise_key ks; + } ks; /* WBSM4 key schedule to use */ +} PROV_WBSM4_WSISE_CCM_CTX; + +const PROV_CCM_HW *ossl_prov_wbsm4_wsise_hw_ccm(size_t keybits); diff --git a/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c b/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c new file mode 100644 index 000000000..4c90b201b --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c @@ -0,0 +1,103 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include "internal/deprecated.h" + +#include "cipher_wbsm4_ccm.h" + +// xiaolai +static int wbsm4_xiaolai_ccm_initkey(PROV_CCM_CTX *ctx, const unsigned char *key, + size_t keylen) +{ + PROV_WBSM4_XIAOLAI_CCM_CTX *actx = (PROV_WBSM4_XIAOLAI_CCM_CTX *)ctx; + wbsm4_xiaolai_key *ks = &actx->ks.ks; + + wbsm4_xiaolai_set_key(key, ks); + CRYPTO_ccm128_init(&ctx->ccm_ctx, ctx->m, ctx->l, &actx->ks.ks, + (block128_f)wbsm4_xiaolai_encrypt); + ctx->str = (ccm128_f)NULL; + + ctx->key_set = 1; + + return 1; +} + +static const PROV_CCM_HW wbsm4_xiaolai_ccm = { + wbsm4_xiaolai_ccm_initkey, + ossl_ccm_generic_setiv, + ossl_ccm_generic_setaad, + ossl_ccm_generic_auth_encrypt, + ossl_ccm_generic_auth_decrypt, + ossl_ccm_generic_gettag}; + +const PROV_CCM_HW *ossl_prov_wbsm4_xiaolai_hw_ccm(size_t keybits) +{ + return &wbsm4_xiaolai_ccm; +} + +// baiwu +static int wbsm4_baiwu_ccm_initkey(PROV_CCM_CTX *ctx, const unsigned char *key, + size_t keylen) +{ + PROV_WBSM4_BAIWU_CCM_CTX *actx = (PROV_WBSM4_BAIWU_CCM_CTX *)ctx; + wbsm4_baiwu_key *ks = &actx->ks.ks; + + wbsm4_baiwu_set_key(key, ks); + CRYPTO_ccm128_init(&ctx->ccm_ctx, ctx->m, ctx->l, &actx->ks.ks, + (block128_f)wbsm4_baiwu_encrypt); + ctx->str = (ccm128_f)NULL; + + ctx->key_set = 1; + + return 1; +} + +static const PROV_CCM_HW wbsm4_baiwu_ccm = { + wbsm4_baiwu_ccm_initkey, + ossl_ccm_generic_setiv, + ossl_ccm_generic_setaad, + ossl_ccm_generic_auth_encrypt, + ossl_ccm_generic_auth_decrypt, + ossl_ccm_generic_gettag}; + +const PROV_CCM_HW *ossl_prov_wbsm4_baiwu_hw_ccm(size_t keybits) +{ + return &wbsm4_baiwu_ccm; +} + +// wsise +static int wbsm4_wsise_ccm_initkey(PROV_CCM_CTX *ctx, const unsigned char *key, + size_t keylen) +{ + PROV_WBSM4_WSISE_CCM_CTX *actx = (PROV_WBSM4_WSISE_CCM_CTX *)ctx; + wbsm4_wsise_key *ks = &actx->ks.ks; + + wbsm4_wsise_set_key(key, ks); + CRYPTO_ccm128_init(&ctx->ccm_ctx, ctx->m, ctx->l, &actx->ks.ks, + (block128_f)wbsm4_wsise_encrypt); + ctx->str = (ccm128_f)NULL; + + ctx->key_set = 1; + + return 1; +} + +static const PROV_CCM_HW wbsm4_wsise_ccm = { + wbsm4_wsise_ccm_initkey, + ossl_ccm_generic_setiv, + ossl_ccm_generic_setaad, + ossl_ccm_generic_auth_encrypt, + ossl_ccm_generic_auth_decrypt, + ossl_ccm_generic_gettag}; + +const PROV_CCM_HW *ossl_prov_wbsm4_wsise_hw_ccm(size_t keybits) +{ + return &wbsm4_wsise_ccm; +} diff --git a/providers/implementations/ciphers/cipher_wbsm4_gcm.c b/providers/implementations/ciphers/cipher_wbsm4_gcm.c new file mode 100644 index 000000000..bfb05e3ef --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_gcm.c @@ -0,0 +1,93 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include "internal/deprecated.h" + +#include "cipher_wbsm4_gcm.h" +#include "prov/implementations.h" +#include "prov/providercommon.h" + +// xiaolai +static void *wbsm4_xiaolai_gcm_newctx(void *provctx, size_t keybits) +{ + PROV_WBSM4_XIAOLAI_GCM_CTX *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx != NULL) + ossl_gcm_initctx(provctx, &ctx->base, keybits, + ossl_prov_wbsm4_xiaolai_hw_gcm(keybits)); + return ctx; +} + +static OSSL_FUNC_cipher_freectx_fn wbsm4_xiaolai_gcm_freectx; +static void wbsm4_xiaolai_gcm_freectx(void *vctx) +{ + PROV_WBSM4_XIAOLAI_GCM_CTX *ctx = (PROV_WBSM4_XIAOLAI_GCM_CTX *)vctx; + + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +/* ossl_wbsm4_xiaolai1225984gcm_functions */ +IMPLEMENT_aead_cipher(wbsm4_xiaolai, gcm, GCM, AEAD_FLAGS, 1225984, 8, 96); + +// baiwu +static void *wbsm4_baiwu_gcm_newctx(void *provctx, size_t keybits) +{ + PROV_WBSM4_BAIWU_GCM_CTX *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx != NULL) + ossl_gcm_initctx(provctx, &ctx->base, keybits, + ossl_prov_wbsm4_baiwu_hw_gcm(keybits)); + return ctx; +} + +static OSSL_FUNC_cipher_freectx_fn wbsm4_baiwu_gcm_freectx; +static void wbsm4_baiwu_gcm_freectx(void *vctx) +{ + PROV_WBSM4_BAIWU_GCM_CTX *ctx = (PROV_WBSM4_BAIWU_GCM_CTX *)vctx; + + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +/* ossl_wbsm4_baiwu272638208gcm_functions */ +IMPLEMENT_aead_cipher(wbsm4_baiwu, gcm, GCM, AEAD_FLAGS, 272638208, 8, 96); + +// wsise +static void *wbsm4_wsise_gcm_newctx(void *provctx, size_t keybits) +{ + PROV_WBSM4_WSISE_GCM_CTX *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx != NULL) + ossl_gcm_initctx(provctx, &ctx->base, keybits, + ossl_prov_wbsm4_wsise_hw_gcm(keybits)); + return ctx; +} + +static OSSL_FUNC_cipher_freectx_fn wbsm4_wsise_gcm_freectx; +static void wbsm4_wsise_gcm_freectx(void *vctx) +{ + PROV_WBSM4_WSISE_GCM_CTX *ctx = (PROV_WBSM4_WSISE_GCM_CTX *)vctx; + + OPENSSL_clear_free(ctx, sizeof(*ctx)); +} + +/* ossl_wbsm4_wsise2274560gcm_functions */ +IMPLEMENT_aead_cipher(wbsm4_wsise, gcm, GCM, AEAD_FLAGS, 2274560, 8, 96); diff --git a/providers/implementations/ciphers/cipher_wbsm4_gcm.h b/providers/implementations/ciphers/cipher_wbsm4_gcm.h new file mode 100644 index 000000000..8c2ed95b8 --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_gcm.h @@ -0,0 +1,52 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include "prov/ciphercommon.h" +#include "prov/ciphercommon_gcm.h" + +// xiaolai +typedef struct prov_wbsm4_xiaolai_gcm_ctx_st +{ + PROV_GCM_CTX base; /* must be first entry in struct */ + union + { + OSSL_UNION_ALIGN; + wbsm4_xiaolai_key ks; + } ks; /* WBSM4 key schedule to use */ +} PROV_WBSM4_XIAOLAI_GCM_CTX; + +const PROV_GCM_HW *ossl_prov_wbsm4_xiaolai_hw_gcm(size_t keybits); + +// baiwu +typedef struct prov_wbsm4_baiwu_gcm_ctx_st +{ + PROV_GCM_CTX base; /* must be first entry in struct */ + union + { + OSSL_UNION_ALIGN; + wbsm4_baiwu_key ks; + } ks; /* WBSM4 key schedule to use */ +} PROV_WBSM4_BAIWU_GCM_CTX; + +const PROV_GCM_HW *ossl_prov_wbsm4_baiwu_hw_gcm(size_t keybits); + +// wsise +typedef struct prov_wbsm4_wsise_gcm_ctx_st +{ + PROV_GCM_CTX base; /* must be first entry in struct */ + union + { + OSSL_UNION_ALIGN; + wbsm4_wsise_key ks; + } ks; /* WBSM4 key schedule to use */ +} PROV_WBSM4_WSISE_GCM_CTX; + +const PROV_GCM_HW *ossl_prov_wbsm4_wsise_hw_gcm(size_t keybits); diff --git a/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c b/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c new file mode 100644 index 000000000..4bbce5c19 --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c @@ -0,0 +1,205 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include "internal/deprecated.h" + +#include "cipher_wbsm4_gcm.h" + +// xiaolai +static int wbsm4_xiaolai_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key, + size_t keylen) +{ + PROV_WBSM4_XIAOLAI_GCM_CTX *actx = (PROV_WBSM4_XIAOLAI_GCM_CTX *)ctx; + wbsm4_xiaolai_key *ks = &actx->ks.ks; + + ctx->ks = ks; + + wbsm4_xiaolai_set_key(key, ks); + CRYPTO_gcm128_init(&ctx->gcm, ks, (block128_f)wbsm4_xiaolai_encrypt); + ctx->ctr = (ctr128_f)NULL; + + ctx->key_set = 1; + + return 1; +} + +static int generic_wbsm4_xiaolai_gcm_cipher_update(PROV_GCM_CTX *ctx, + const unsigned char *in, + size_t len, unsigned char *out) +{ + if (ctx->enc) + { + if (ctx->ctr != NULL) + { + if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) + return 0; + } + else + { + if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len)) + return 0; + } + } + else + { + if (ctx->ctr != NULL) + { + if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) + return 0; + } + else + { + if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len)) + return 0; + } + } + return 1; +} + +static const PROV_GCM_HW wbsm4_xiaolai_gcm = { + wbsm4_xiaolai_gcm_initkey, + ossl_gcm_setiv, + ossl_gcm_aad_update, + generic_wbsm4_xiaolai_gcm_cipher_update, + ossl_gcm_cipher_final, + ossl_gcm_one_shot}; + +const PROV_GCM_HW *ossl_prov_wbsm4_xiaolai_hw_gcm(size_t keybits) +{ + return &wbsm4_xiaolai_gcm; +} + +// baiwu +static int wbsm4_baiwu_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key, + size_t keylen) +{ + PROV_WBSM4_BAIWU_GCM_CTX *actx = (PROV_WBSM4_BAIWU_GCM_CTX *)ctx; + wbsm4_baiwu_key *ks = &actx->ks.ks; + + ctx->ks = ks; + + wbsm4_baiwu_set_key(key, ks); + CRYPTO_gcm128_init(&ctx->gcm, ks, (block128_f)wbsm4_baiwu_encrypt); + ctx->ctr = (ctr128_f)NULL; + + ctx->key_set = 1; + + return 1; +} + +static int generic_wbsm4_baiwu_gcm_cipher_update(PROV_GCM_CTX *ctx, + const unsigned char *in, + size_t len, unsigned char *out) +{ + if (ctx->enc) + { + if (ctx->ctr != NULL) + { + if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) + return 0; + } + else + { + if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len)) + return 0; + } + } + else + { + if (ctx->ctr != NULL) + { + if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) + return 0; + } + else + { + if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len)) + return 0; + } + } + return 1; +} + +static const PROV_GCM_HW wbsm4_baiwu_gcm = { + wbsm4_baiwu_gcm_initkey, + ossl_gcm_setiv, + ossl_gcm_aad_update, + generic_wbsm4_baiwu_gcm_cipher_update, + ossl_gcm_cipher_final, + ossl_gcm_one_shot}; + +const PROV_GCM_HW *ossl_prov_wbsm4_baiwu_hw_gcm(size_t keybits) +{ + return &wbsm4_baiwu_gcm; +} + +// wsise +static int wbsm4_wsise_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key, + size_t keylen) +{ + PROV_WBSM4_WSISE_GCM_CTX *actx = (PROV_WBSM4_WSISE_GCM_CTX *)ctx; + wbsm4_wsise_key *ks = &actx->ks.ks; + + ctx->ks = ks; + + wbsm4_wsise_set_key(key, ks); + CRYPTO_gcm128_init(&ctx->gcm, ks, (block128_f)wbsm4_wsise_encrypt); + ctx->ctr = (ctr128_f)NULL; + + ctx->key_set = 1; + + return 1; +} + +static int generic_wbsm4_wsise_gcm_cipher_update(PROV_GCM_CTX *ctx, + const unsigned char *in, + size_t len, unsigned char *out) +{ + if (ctx->enc) + { + if (ctx->ctr != NULL) + { + if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) + return 0; + } + else + { + if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len)) + return 0; + } + } + else + { + if (ctx->ctr != NULL) + { + if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) + return 0; + } + else + { + if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len)) + return 0; + } + } + return 1; +} + +static const PROV_GCM_HW wbsm4_wsise_gcm = { + wbsm4_wsise_gcm_initkey, + ossl_gcm_setiv, + ossl_gcm_aad_update, + generic_wbsm4_wsise_gcm_cipher_update, + ossl_gcm_cipher_final, + ossl_gcm_one_shot}; + +const PROV_GCM_HW *ossl_prov_wbsm4_wsise_hw_gcm(size_t keybits) +{ + return &wbsm4_wsise_gcm; +} diff --git a/providers/implementations/ciphers/cipher_wbsm4_hw.c b/providers/implementations/ciphers/cipher_wbsm4_hw.c new file mode 100644 index 000000000..80baa0be5 --- /dev/null +++ b/providers/implementations/ciphers/cipher_wbsm4_hw.c @@ -0,0 +1,134 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2019-2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include "cipher_wbsm4.h" + +// xiaolai +static int cipher_hw_wbsm4_xiaolai_initkey(PROV_CIPHER_CTX *ctx, + const unsigned char *key, size_t keylen) +{ + PROV_WBSM4_XIAOLAI_CTX *sctx = (PROV_WBSM4_XIAOLAI_CTX *)ctx; + wbsm4_xiaolai_key *ks = &sctx->ks.ks; + + ctx->ks = ks; + if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && ctx->mode != EVP_CIPH_CBC_MODE)) + { + wbsm4_xiaolai_set_key(key, ks); + ctx->block = (block128_f)wbsm4_xiaolai_encrypt; + } + else + { + wbsm4_xiaolai_set_key(key, ks); + // ctx->block = (block128_f)ossl_wbsm4_xiaolai_decrypt; + return 0; + } + + return 1; +} + +IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_xiaolai_copyctx, PROV_WBSM4_XIAOLAI_CTX) + +#define PROV_CIPHER_HW_wbsm4_xiaolai_mode(mode) \ + static const PROV_CIPHER_HW wbsm4_xiaolai_##mode = { \ + cipher_hw_wbsm4_xiaolai_initkey, \ + ossl_cipher_hw_generic_##mode, \ + cipher_hw_wbsm4_xiaolai_copyctx}; \ + const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_##mode(size_t keybits) \ + { \ + return &wbsm4_xiaolai_##mode; \ + } + +PROV_CIPHER_HW_wbsm4_xiaolai_mode(cbc); +PROV_CIPHER_HW_wbsm4_xiaolai_mode(ecb); +PROV_CIPHER_HW_wbsm4_xiaolai_mode(ofb128); +PROV_CIPHER_HW_wbsm4_xiaolai_mode(cfb128); +PROV_CIPHER_HW_wbsm4_xiaolai_mode(ctr); + +// baiwu +static int cipher_hw_wbsm4_baiwu_initkey(PROV_CIPHER_CTX *ctx, + const unsigned char *key, size_t keylen) +{ + PROV_WBSM4_BAIWU_CTX *sctx = (PROV_WBSM4_BAIWU_CTX *)ctx; + wbsm4_baiwu_key *ks = &sctx->ks.ks; + + ctx->ks = ks; + if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && ctx->mode != EVP_CIPH_CBC_MODE)) + { + wbsm4_baiwu_set_key(key, ks); + ctx->block = (block128_f)wbsm4_baiwu_encrypt; + } + else + { + wbsm4_baiwu_set_key(key, ks); + // ctx->block = (block128_f)ossl_wbsm4_baiwu_decrypt; + return 0; + } + + return 1; +} + +IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_baiwu_copyctx, PROV_WBSM4_BAIWU_CTX) + +#define PROV_CIPHER_HW_wbsm4_baiwu_mode(mode) \ + static const PROV_CIPHER_HW wbsm4_baiwu_##mode = { \ + cipher_hw_wbsm4_baiwu_initkey, \ + ossl_cipher_hw_generic_##mode, \ + cipher_hw_wbsm4_baiwu_copyctx}; \ + const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_##mode(size_t keybits) \ + { \ + return &wbsm4_baiwu_##mode; \ + } + +PROV_CIPHER_HW_wbsm4_baiwu_mode(cbc); +PROV_CIPHER_HW_wbsm4_baiwu_mode(ecb); +PROV_CIPHER_HW_wbsm4_baiwu_mode(ofb128); +PROV_CIPHER_HW_wbsm4_baiwu_mode(cfb128); +PROV_CIPHER_HW_wbsm4_baiwu_mode(ctr); + +// wsise +static int cipher_hw_wbsm4_wsise_initkey(PROV_CIPHER_CTX *ctx, + const unsigned char *key, size_t keylen) +{ + PROV_WBSM4_WSISE_CTX *sctx = (PROV_WBSM4_WSISE_CTX *)ctx; + wbsm4_wsise_key *ks = &sctx->ks.ks; + + ctx->ks = ks; + if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && ctx->mode != EVP_CIPH_CBC_MODE)) + { + wbsm4_wsise_set_key(key, ks); + ctx->block = (block128_f)wbsm4_wsise_encrypt; + } + else + { + wbsm4_wsise_set_key(key, ks); + // ctx->block = (block128_f)ossl_wbsm4_wsise_decrypt; + return 0; + } + + return 1; +} + +IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_wsise_copyctx, PROV_WBSM4_WSISE_CTX) + +#define PROV_CIPHER_HW_wbsm4_wsise_mode(mode) \ + static const PROV_CIPHER_HW wbsm4_wsise_##mode = { \ + cipher_hw_wbsm4_wsise_initkey, \ + ossl_cipher_hw_generic_##mode, \ + cipher_hw_wbsm4_wsise_copyctx}; \ + const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_##mode(size_t keybits) \ + { \ + return &wbsm4_wsise_##mode; \ + } + +PROV_CIPHER_HW_wbsm4_wsise_mode(cbc); +PROV_CIPHER_HW_wbsm4_wsise_mode(ecb); +PROV_CIPHER_HW_wbsm4_wsise_mode(ofb128); +PROV_CIPHER_HW_wbsm4_wsise_mode(cfb128); +PROV_CIPHER_HW_wbsm4_wsise_mode(ctr); diff --git a/providers/implementations/include/prov/implementations.h b/providers/implementations/include/prov/implementations.h index 85e52074e..359a06ee3 100644 --- a/providers/implementations/include/prov/implementations.h +++ b/providers/implementations/include/prov/implementations.h @@ -97,6 +97,31 @@ extern const OSSL_DISPATCH ossl_sm4128cfb128_functions[]; extern const OSSL_DISPATCH ossl_sm4128gcm_functions[]; extern const OSSL_DISPATCH ossl_sm4128ccm_functions[]; #endif /* OPENSSL_NO_SM4 */ +#ifndef OPENSSL_NO_WBSM4 +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984ecb_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984cbc_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984ctr_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984ofb128_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984cfb128_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984gcm_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_xiaolai1225984ccm_functions[]; + +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208ecb_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208cbc_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208ctr_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208ofb128_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208cfb128_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208gcm_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_baiwu272638208ccm_functions[]; + +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560ecb_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560cbc_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560ctr_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560ofb128_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560cfb128_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560gcm_functions[]; +extern const OSSL_DISPATCH ossl_wbsm4_wsise2274560ccm_functions[]; +#endif /* OPENSSL_NO_WBSM4 */ #ifndef OPENSSL_NO_RC5 extern const OSSL_DISPATCH ossl_rc5128ecb_functions[]; extern const OSSL_DISPATCH ossl_rc5128cbc_functions[]; @@ -181,6 +206,9 @@ extern const OSSL_DISPATCH ossl_kdf_x963_kdf_functions[]; extern const OSSL_DISPATCH ossl_kdf_kbkdf_functions[]; extern const OSSL_DISPATCH ossl_kdf_x942_kdf_functions[]; extern const OSSL_DISPATCH ossl_kdf_krb5kdf_functions[]; +#ifndef OPENSSL_NO_WBSM4 +extern const OSSL_DISPATCH ossl_kdf_wbsm4_functions[]; +#endif /* RNGs */ extern const OSSL_DISPATCH ossl_test_rng_functions[]; diff --git a/providers/implementations/include/prov/names.h b/providers/implementations/include/prov/names.h index 2b672c29a..8e5d299f4 100644 --- a/providers/implementations/include/prov/names.h +++ b/providers/implementations/include/prov/names.h @@ -113,6 +113,27 @@ #define PROV_NAMES_SM4_CFB "SM4-CFB:SM4-CFB128:1.2.156.10197.1.104.4" #define PROV_NAMES_SM4_GCM "SM4-GCM:1.2.156.10197.1.104.8" #define PROV_NAMES_SM4_CCM "SM4-CCM:1.2.156.10197.1.104.9" +#define PROV_NAMES_WBSM4_XIAOLAI_ECB "WBSM4-XIAOLAI-ECB" +#define PROV_NAMES_WBSM4_XIAOLAI_CBC "WBSM4-XIAOLAI-CBC:WBSM4-XIAOLAI" +#define PROV_NAMES_WBSM4_XIAOLAI_CTR "WBSM4-XIAOLAI-CTR" +#define PROV_NAMES_WBSM4_XIAOLAI_OFB "WBSM4-XIAOLAI-OFB:WBSM4-XIAOLAI-OFB128" +#define PROV_NAMES_WBSM4_XIAOLAI_CFB "WBSM4-XIAOLAI-CFB:WBSM4-XIAOLAI-CFB128" +#define PROV_NAMES_WBSM4_XIAOLAI_GCM "WBSM4-XIAOLAI-GCM" +#define PROV_NAMES_WBSM4_XIAOLAI_CCM "WBSM4-XIAOLAI-CCM" +#define PROV_NAMES_WBSM4_BAIWU_ECB "WBSM4-BAIWU-ECB" +#define PROV_NAMES_WBSM4_BAIWU_CBC "WBSM4-BAIWU-CBC:WBSM4-BAIWU" +#define PROV_NAMES_WBSM4_BAIWU_CTR "WBSM4-BAIWU-CTR" +#define PROV_NAMES_WBSM4_BAIWU_OFB "WBSM4-BAIWU-OFB:WBSM4-BAIWU-OFB128" +#define PROV_NAMES_WBSM4_BAIWU_CFB "WBSM4-BAIWU-CFB:WBSM4-BAIWU-CFB128" +#define PROV_NAMES_WBSM4_BAIWU_GCM "WBSM4-BAIWU-GCM" +#define PROV_NAMES_WBSM4_BAIWU_CCM "WBSM4-BAIWU-CCM" +#define PROV_NAMES_WBSM4_WSISE_ECB "WBSM4-WSISE-ECB" +#define PROV_NAMES_WBSM4_WSISE_CBC "WBSM4-WSISE-CBC:WBSM4-WSISE" +#define PROV_NAMES_WBSM4_WSISE_CTR "WBSM4-WSISE-CTR" +#define PROV_NAMES_WBSM4_WSISE_OFB "WBSM4-WSISE-OFB:WBSM4-WSISE-OFB128" +#define PROV_NAMES_WBSM4_WSISE_CFB "WBSM4-WSISE-CFB:WBSM4-WSISE-CFB128" +#define PROV_NAMES_WBSM4_WSISE_GCM "WBSM4-WSISE-GCM" +#define PROV_NAMES_WBSM4_WSISE_CCM "WBSM4-WSISE-CCM" #define PROV_NAMES_ChaCha20 "ChaCha20" #define PROV_NAMES_ChaCha20_Poly1305 "ChaCha20-Poly1305" #define PROV_NAMES_RC4 "RC4:1.2.840.113549.3.4" @@ -182,6 +203,7 @@ #define PROV_NAMES_SCRYPT "SCRYPT:id-scrypt:1.3.6.1.4.1.11591.4.11" #define PROV_DESCS_SCRYPT_SIGN "OpenSSL SCRYPT via EVP_PKEY implementation" #define PROV_NAMES_KRB5KDF "KRB5KDF" +#define PROV_NAMES_WBSM4KDF "WBSM4KDF" /*- * MACs diff --git a/providers/implementations/kdfs/build.info b/providers/implementations/kdfs/build.info index f4620adce..b1ae2ce06 100644 --- a/providers/implementations/kdfs/build.info +++ b/providers/implementations/kdfs/build.info @@ -12,6 +12,7 @@ $SSKDF_GOAL=../../libdefault.a ../../libfips.a $SCRYPT_GOAL=../../libdefault.a $SSHKDF_GOAL=../../libdefault.a ../../libfips.a $X942KDF_GOAL=../../libdefault.a ../../libfips.a +$WBSM4KDF_GOAL=../../libdefault.a SOURCE[$TLS1_PRF_GOAL]=tls1_prf.c @@ -36,3 +37,5 @@ SOURCE[$SCRYPT_GOAL]=scrypt.c SOURCE[$SSHKDF_GOAL]=sshkdf.c SOURCE[$X942KDF_GOAL]=x942kdf.c DEPEND[x942kdf.o]=../../common/include/prov/der_wrap.h + +SOURCE[$WBSM4KDF_GOAL]=wbsm4kdf.c \ No newline at end of file diff --git a/providers/implementations/kdfs/wbsm4kdf.c b/providers/implementations/kdfs/wbsm4kdf.c new file mode 100644 index 000000000..625078490 --- /dev/null +++ b/providers/implementations/kdfs/wbsm4kdf.c @@ -0,0 +1,319 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://github.com/Tongsuo-Project/Tongsuo/blob/master/LICENSE.txt + */ + +#include +#include +#include +#include "internal/cryptlib.h" +#include "prov/providercommon.h" + +#ifndef OPENSSL_NO_WBSM4 +#include "crypto/wbsm4.h" + +static OSSL_FUNC_kdf_newctx_fn kdf_wbsm4_new; +static OSSL_FUNC_kdf_freectx_fn kdf_wbsm4_free; +static OSSL_FUNC_kdf_reset_fn kdf_wbsm4_reset; +static OSSL_FUNC_kdf_derive_fn kdf_wbsm4_derive; +static OSSL_FUNC_kdf_settable_ctx_params_fn kdf_wbsm4_settable_ctx_params; +static OSSL_FUNC_kdf_set_ctx_params_fn kdf_wbsm4_set_ctx_params; +static OSSL_FUNC_kdf_gettable_ctx_params_fn kdf_wbsm4_gettable_ctx_params; +static OSSL_FUNC_kdf_get_ctx_params_fn kdf_wbsm4_get_ctx_params; + +typedef struct +{ + void *provctx; + unsigned char *key; + size_t key_len; + unsigned char *cipher; + size_t cipher_len; +} KDF_WBSM4; + +static void *kdf_wbsm4_new(void *provctx) +{ + KDF_WBSM4 *ctx; + + if (!ossl_prov_is_running()) + return NULL; + + ctx = OPENSSL_zalloc(sizeof(*ctx)); + if (ctx == NULL) + { + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return NULL; + } + ctx->provctx = provctx; + return ctx; +} + +static void kdf_wbsm4_cleanup(KDF_WBSM4 *ctx) +{ + if (ctx->cipher) + OPENSSL_free(ctx->cipher); + if (ctx->key) + OPENSSL_cleanse(ctx->key, ctx->key_len); + memset(ctx, 0, sizeof(*ctx)); +} + +static void kdf_wbsm4_free(void *vctx) +{ + KDF_WBSM4 *ctx = (KDF_WBSM4 *)vctx; + + if (ctx != NULL) + { + kdf_wbsm4_cleanup(ctx); + OPENSSL_free(ctx); + } +} + +static void kdf_wbsm4_reset(void *vctx) +{ + KDF_WBSM4 *ctx = (KDF_WBSM4 *)vctx; + void *provctx = ctx->provctx; + + kdf_wbsm4_cleanup(ctx); + ctx->provctx = provctx; +} + +static int kdf_wbsm4_set_membuf(unsigned char **buffer, size_t *buflen, + const OSSL_PARAM *p) +{ + OPENSSL_clear_free(*buffer, *buflen); + *buffer = NULL; + *buflen = 0; + + if (p->data_size == 0) + { + if ((*buffer = OPENSSL_zalloc(1)) == NULL) + { + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return 0; + } + } + else if (p->data != NULL) + { + if (!OSSL_PARAM_get_utf8_string(p, (char **)buffer, *buflen)) + return 0; + } + return 1; +} + +static int kdf_wbsm4_derive(void *vctx, unsigned char *key, size_t keylen, + const OSSL_PARAM params[]) +{ + KDF_WBSM4 *ctx = (KDF_WBSM4 *)vctx; + + if (!ossl_prov_is_running() || !kdf_wbsm4_set_ctx_params(ctx, params)) + return 0; + + if (ctx->cipher == NULL) + { + ERR_raise(ERR_LIB_PROV, PROV_R_MISSING_CIPHER); + return 0; + } + + if (ctx->key == NULL) + { + ERR_raise(ERR_LIB_PROV, PROV_R_MISSING_KEY); + return 0; + } + if (ctx->key_len != 32) + { + ERR_raise(ERR_LIB_PROV, PROV_R_INVALID_KEY_LENGTH); + return 0; + } + + unsigned char sm4key[16]; + size_t sm4key_len = sizeof(sm4key); + if (!OPENSSL_hexstr2buf_ex(sm4key, sm4key_len, &sm4key_len, (const char *)ctx->key, 0)) + { + ERR_raise(ERR_LIB_PROV, PROV_R_INVALID_KEY); + return 0; + } + + if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-XIAOLAI") == 0) + { + if (keylen != sizeof(wbsm4_xiaolai_key)) + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, PROV_R_BAD_LENGTH); + return 0; + } + + if (key == NULL) + { + OPENSSL_cleanse(sm4key, sm4key_len); + return 1; + } + + wbsm4_xiaolai_key *wbsm4key = OPENSSL_zalloc(sizeof(wbsm4_xiaolai_key)); + if (wbsm4key == NULL) + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return 0; + } + + wbsm4_xiaolai_gen(sm4key, wbsm4key); + wbsm4_xiaolai_export_key(wbsm4key, key); + + OPENSSL_cleanse(sm4key, sm4key_len); + OPENSSL_cleanse(wbsm4key, sizeof(wbsm4_xiaolai_key)); + + return 1; + } + else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-BAIWU") == 0) + { + if (keylen != sizeof(wbsm4_baiwu_key)) + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, PROV_R_BAD_LENGTH); + return 0; + } + + if (key == NULL) + { + OPENSSL_cleanse(sm4key, sm4key_len); + return 1; + } + + wbsm4_baiwu_key *wbsm4key = OPENSSL_zalloc(sizeof(wbsm4_baiwu_key)); + if (wbsm4key == NULL) + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return 0; + } + + wbsm4_baiwu_gen(sm4key, wbsm4key); + wbsm4_baiwu_export_key(wbsm4key, key); + + OPENSSL_cleanse(sm4key, sm4key_len); + OPENSSL_cleanse(wbsm4key, sizeof(wbsm4_baiwu_key)); + + return 1; + } + else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-WSISE") == 0) + { + if (keylen != sizeof(wbsm4_wsise_key)) + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, PROV_R_BAD_LENGTH); + return 0; + } + + if (key == NULL) + { + OPENSSL_cleanse(sm4key, sm4key_len); + return 1; + } + + wbsm4_wsise_key *wbsm4key = OPENSSL_zalloc(sizeof(wbsm4_wsise_key)); + if (wbsm4key == NULL) + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return 0; + } + + wbsm4_wsise_gen(sm4key, wbsm4key); + wbsm4_wsise_export_key(wbsm4key, key); + + OPENSSL_cleanse(sm4key, sm4key_len); + OPENSSL_cleanse(wbsm4key, sizeof(wbsm4_wsise_key)); + + return 1; + } + else + { + OPENSSL_cleanse(sm4key, sm4key_len); + ERR_raise(ERR_LIB_PROV, PROV_R_MISSING_CIPHER); + return 0; + } +} + +static int kdf_wbsm4_set_ctx_params(void *vctx, const OSSL_PARAM params[]) +{ + const OSSL_PARAM *p; + KDF_WBSM4 *ctx = vctx; + + if ((p = OSSL_PARAM_locate_const(params, OSSL_KDF_PARAM_KEY)) != NULL) + { + if (!kdf_wbsm4_set_membuf(&ctx->key, &ctx->key_len, p)) + return 0; + ctx->key_len = strlen((char *)ctx->key); + } + + if ((p = OSSL_PARAM_locate_const(params, OSSL_KDF_PARAM_CIPHER)) != NULL) + { + if (!kdf_wbsm4_set_membuf(&ctx->cipher, &ctx->cipher_len, p)) + return 0; + ctx->cipher_len = strlen((char *)ctx->cipher); + } + + return 1; +} + +static const OSSL_PARAM *kdf_wbsm4_settable_ctx_params(ossl_unused void *ctx, + ossl_unused void *p_ctx) +{ + static const OSSL_PARAM known_settable_ctx_params[] = { + OSSL_PARAM_utf8_string(OSSL_KDF_PARAM_KEY, NULL, 0), + OSSL_PARAM_utf8_string(OSSL_KDF_PARAM_CIPHER, NULL, 0), + OSSL_PARAM_END}; + return known_settable_ctx_params; +} + +static int kdf_wbsm4_get_ctx_params(void *vctx, OSSL_PARAM params[]) +{ + KDF_WBSM4 *ctx = (KDF_WBSM4 *)vctx; + + OSSL_PARAM *p; + size_t keylen = 0; + + if ((p = OSSL_PARAM_locate(params, OSSL_KDF_PARAM_SIZE)) != NULL) + { + if (ctx->cipher == NULL) + keylen = 0; + else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-XIAOLAI") == 0) + keylen = sizeof(wbsm4_xiaolai_key); + else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-BAIWU") == 0) + keylen = sizeof(wbsm4_baiwu_key); + else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-WSISE") == 0) + keylen = sizeof(wbsm4_wsise_key); + } + + if (keylen != 0) + return OSSL_PARAM_set_size_t(p, keylen); + + return -2; +} + +static const OSSL_PARAM *kdf_wbsm4_gettable_ctx_params(ossl_unused void *ctx, + ossl_unused void *p_ctx) +{ + static const OSSL_PARAM known_gettable_ctx_params[] = { + OSSL_PARAM_size_t(OSSL_KDF_PARAM_SIZE, NULL), + OSSL_PARAM_END}; + return known_gettable_ctx_params; +} + +const OSSL_DISPATCH ossl_kdf_wbsm4_functions[] = { + {OSSL_FUNC_KDF_NEWCTX, (void (*)(void))kdf_wbsm4_new}, + {OSSL_FUNC_KDF_FREECTX, (void (*)(void))kdf_wbsm4_free}, + {OSSL_FUNC_KDF_RESET, (void (*)(void))kdf_wbsm4_reset}, + {OSSL_FUNC_KDF_DERIVE, (void (*)(void))kdf_wbsm4_derive}, + {OSSL_FUNC_KDF_SETTABLE_CTX_PARAMS, + (void (*)(void))kdf_wbsm4_settable_ctx_params}, + {OSSL_FUNC_KDF_SET_CTX_PARAMS, (void (*)(void))kdf_wbsm4_set_ctx_params}, + {OSSL_FUNC_KDF_GETTABLE_CTX_PARAMS, + (void (*)(void))kdf_wbsm4_gettable_ctx_params}, + {OSSL_FUNC_KDF_GET_CTX_PARAMS, (void (*)(void))kdf_wbsm4_get_ctx_params}, + {0, NULL}}; +#endif \ No newline at end of file diff --git a/test/build.info b/test/build.info index a03023f4a..aafcc74fe 100644 --- a/test/build.info +++ b/test/build.info @@ -620,6 +620,9 @@ IF[{- !$disabled{tests} -}] IF[{- !$disabled{sm4} -}] PROGRAMS{noinst}=sm4_internal_test ENDIF + IF[{- !$disabled{wbsm4} -}] + PROGRAMS{noinst}=wbsm4_internal_test + ENDIF IF[{- !$disabled{zuc} -}] PROGRAMS{noinst}=zuc_internal_test ENDIF @@ -768,6 +771,10 @@ IF[{- !$disabled{tests} -}] INCLUDE[sm4_internal_test]=.. ../include ../apps/include DEPEND[sm4_internal_test]=../libcrypto.a libtestutil.a + SOURCE[wbsm4_internal_test]=wbsm4_internal_test.c + INCLUDE[wbsm4_internal_test]=.. ../include ../apps/include + DEPEND[wbsm4_internal_test]=../libcrypto.a libtestutil.a + SOURCE[zuc_internal_test]=zuc_internal_test.c INCLUDE[zuc_internal_test]=.. ../include ../apps/include ../crypto/include DEPEND[zuc_internal_test]=../libcrypto.a libtestutil.a diff --git a/test/recipes/03-test_internal_wbsm4.t b/test/recipes/03-test_internal_wbsm4.t new file mode 100644 index 000000000..bdf3541e7 --- /dev/null +++ b/test/recipes/03-test_internal_wbsm4.t @@ -0,0 +1,18 @@ +#! /usr/bin/env perl +# Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. +# Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved. +# Copyright 2017 [Ribose Inc.](https://www.ribose.com). All Rights Reserved. +# +# Licensed under the Apache License 2.0 (the "License"). You may not use +# this file except in compliance with the License. You can obtain a copy +# in the file LICENSE in the source distribution or at +# https://www.openssl.org/source/license.html + +use strict; +use OpenSSL::Test; # get 'plan' +use OpenSSL::Test::Simple; +use OpenSSL::Test::Utils; + +setup("test_internal_wbsm4"); + +simple_test("test_internal_wbsm4", "wbsm4_internal_test", "wbsm4"); diff --git a/test/wbsm4_internal_test.c b/test/wbsm4_internal_test.c new file mode 100644 index 000000000..b8d10711f --- /dev/null +++ b/test/wbsm4_internal_test.c @@ -0,0 +1,416 @@ +/* + * Copyright 2024 The Tongsuo Project Authors. All Rights Reserved. + * Copyright 2017-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright 2017 Ribose Inc. All Rights Reserved. + * + * Licensed under the Apache License 2.0 (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +/* + * Internal tests for the WBSM4 module. + */ + +#include +#include +#include +#include "testutil.h" + +#ifndef OPENSSL_NO_WBSM4 +#include "crypto/sm4.h" +#include "crypto/wbsm4.h" + +static int test_wbsm4_Xiao_Lai(void) +{ + static const uint8_t k[SM4_BLOCK_SIZE] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}; + + static const uint8_t input[SM4_BLOCK_SIZE] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}; + + /* + * This test vector comes from Example 1 of GB/T 32907-2016, + * and described in Internet Draft draft-ribose-cfrg-sm4-02. + */ + static const uint8_t expected[SM4_BLOCK_SIZE] = { + 0x68, 0x1e, 0xdf, 0x34, 0xd2, 0x06, 0x96, 0x5e, + 0x86, 0xb3, 0xe9, 0x4f, 0x53, 0x6e, 0x42, 0x46}; + + /* + * This test vector comes from Example 2 from GB/T 32907-2016, + * and described in Internet Draft draft-ribose-cfrg-sm4-02. + * After 1,000,000 iterations. + */ + static const uint8_t expected_iter[SM4_BLOCK_SIZE] = { + 0x59, 0x52, 0x98, 0xc7, 0xc6, 0xfd, 0x27, 0x1f, + 0x04, 0x02, 0xf8, 0x04, 0xc3, 0x3d, 0x3f, 0x66}; + + wbsm4_xiaolai_key *wbsm4_key = (wbsm4_xiaolai_key *)malloc(sizeof(wbsm4_xiaolai_key)); + if (wbsm4_key == NULL) + return 0; + memset(wbsm4_key, 0, sizeof(wbsm4_xiaolai_key)); + + uint8_t block[SM4_BLOCK_SIZE]; + + wbsm4_xiaolai_gen(k, wbsm4_key); + + memcpy(block, input, SM4_BLOCK_SIZE); + wbsm4_xiaolai_encrypt(block, block, wbsm4_key); + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + free(wbsm4_key); + return 0; + } + + unsigned char *keybuf = (unsigned char *)malloc(sizeof(wbsm4_xiaolai_key)); + if (!TEST_ptr_ne(keybuf, NULL)) + { + free(wbsm4_key); + return 0; + } + + wbsm4_xiaolai_export_key(wbsm4_key, keybuf); + wbsm4_xiaolai_set_key(keybuf, wbsm4_key); + + memcpy(block, input, SM4_BLOCK_SIZE); + wbsm4_xiaolai_encrypt(block, block, wbsm4_key); + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + // int i; + // for (i = 0; i != 999999; ++i) + // wbsm4_xiaolai_encrypt(block, block, wbsm4_key); + + // if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected_iter, SM4_BLOCK_SIZE)) + // return 0; + // return 1; + (void)expected_iter; + + const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-XIAOLAI-ECB"); + if (!TEST_ptr_ne(cipher, NULL)) + { + free(wbsm4_key); + return 0; + } + int key_length = EVP_CIPHER_get_key_length(cipher); + if (!TEST_int_eq(key_length, sizeof(wbsm4_xiaolai_key))) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new(); + if (!TEST_ptr_ne(cipher_ctx, NULL)) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + int ret = EVP_EncryptInit(cipher_ctx, cipher, (unsigned char *)keybuf, NULL); + if (!TEST_int_eq(ret, 1)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + + int outl = SM4_BLOCK_SIZE; + memcpy(block, input, SM4_BLOCK_SIZE); + ret = EVP_EncryptUpdate(cipher_ctx, block, &outl, block, SM4_BLOCK_SIZE); + if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 1; +} + +static int test_wbsm4_Bai_Wu(void) +{ + static const uint8_t k[SM4_BLOCK_SIZE] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}; + + static const uint8_t input[SM4_BLOCK_SIZE] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}; + + /* + * This test vector comes from Example 1 of GB/T 32907-2016, + * and described in Internet Draft draft-ribose-cfrg-sm4-02. + */ + static const uint8_t expected[SM4_BLOCK_SIZE] = { + 0x68, 0x1e, 0xdf, 0x34, 0xd2, 0x06, 0x96, 0x5e, + 0x86, 0xb3, 0xe9, 0x4f, 0x53, 0x6e, 0x42, 0x46}; + + /* + * This test vector comes from Example 2 from GB/T 32907-2016, + * and described in Internet Draft draft-ribose-cfrg-sm4-02. + * After 1,000,000 iterations. + */ + static const uint8_t expected_iter[SM4_BLOCK_SIZE] = { + 0x59, 0x52, 0x98, 0xc7, 0xc6, 0xfd, 0x27, 0x1f, + 0x04, 0x02, 0xf8, 0x04, 0xc3, 0x3d, 0x3f, 0x66}; + + wbsm4_baiwu_key *wbsm4_key = (wbsm4_baiwu_key *)malloc(sizeof(wbsm4_baiwu_key)); + if (wbsm4_key == NULL) + return 0; + memset(wbsm4_key, 0, sizeof(wbsm4_baiwu_key)); + + uint8_t block[SM4_BLOCK_SIZE]; + + wbsm4_baiwu_gen(k, wbsm4_key); + + memcpy(block, input, SM4_BLOCK_SIZE); + wbsm4_baiwu_encrypt(block, block, wbsm4_key); + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + free(wbsm4_key); + return 0; + } + + unsigned char *keybuf = (unsigned char *)malloc(sizeof(wbsm4_baiwu_key)); + if (!TEST_ptr_ne(keybuf, NULL)) + { + free(wbsm4_key); + return 0; + } + + wbsm4_baiwu_export_key(wbsm4_key, keybuf); + wbsm4_baiwu_set_key(keybuf, wbsm4_key); + + memcpy(block, input, SM4_BLOCK_SIZE); + wbsm4_baiwu_encrypt(block, block, wbsm4_key); + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + // int i; + // for (i = 0; i != 999999; ++i) + // wbsm4_baiwu_encrypt(block, block, wbsm4_key); + + // if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected_iter, SM4_BLOCK_SIZE)) + // return 0; + // return 1; + (void)expected_iter; + + const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-BAIWU-ECB"); + if (!TEST_ptr_ne(cipher, NULL)) + { + free(wbsm4_key); + return 0; + } + int key_length = EVP_CIPHER_get_key_length(cipher); + if (!TEST_int_eq(key_length, sizeof(wbsm4_baiwu_key))) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new(); + if (!TEST_ptr_ne(cipher_ctx, NULL)) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + int ret = EVP_EncryptInit(cipher_ctx, cipher, (unsigned char *)keybuf, NULL); + if (!TEST_int_eq(ret, 1)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + + int outl = SM4_BLOCK_SIZE; + memcpy(block, input, SM4_BLOCK_SIZE); + ret = EVP_EncryptUpdate(cipher_ctx, block, &outl, block, SM4_BLOCK_SIZE); + if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 1; +} + +static int test_wbsm4_WSISE(void) +{ + static const uint8_t k[SM4_BLOCK_SIZE] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}; + + static const uint8_t input[SM4_BLOCK_SIZE] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}; + + /* + * This test vector comes from Example 1 of GB/T 32907-2016, + * and described in Internet Draft draft-ribose-cfrg-sm4-02. + */ + static const uint8_t expected[SM4_BLOCK_SIZE] = { + 0x68, 0x1e, 0xdf, 0x34, 0xd2, 0x06, 0x96, 0x5e, + 0x86, 0xb3, 0xe9, 0x4f, 0x53, 0x6e, 0x42, 0x46}; + + /* + * This test vector comes from Example 2 from GB/T 32907-2016, + * and described in Internet Draft draft-ribose-cfrg-sm4-02. + * After 1,000,000 iterations. + */ + static const uint8_t expected_iter[SM4_BLOCK_SIZE] = { + 0x59, 0x52, 0x98, 0xc7, 0xc6, 0xfd, 0x27, 0x1f, + 0x04, 0x02, 0xf8, 0x04, 0xc3, 0x3d, 0x3f, 0x66}; + + wbsm4_wsise_key *wbsm4_key = (wbsm4_wsise_key *)malloc(sizeof(wbsm4_wsise_key)); + if (wbsm4_key == NULL) + return 0; + memset(wbsm4_key, 0, sizeof(wbsm4_wsise_key)); + + uint8_t block[SM4_BLOCK_SIZE]; + + wbsm4_wsise_gen(k, wbsm4_key); + + memcpy(block, input, SM4_BLOCK_SIZE); + wbsm4_wsise_encrypt(block, block, wbsm4_key); + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + free(wbsm4_key); + return 0; + } + + unsigned char *keybuf = (unsigned char *)malloc(sizeof(wbsm4_wsise_key)); + if (!TEST_ptr_ne(keybuf, NULL)) + { + free(wbsm4_key); + return 0; + } + + wbsm4_wsise_export_key(wbsm4_key, keybuf); + wbsm4_wsise_set_key(keybuf, wbsm4_key); + + memcpy(block, input, SM4_BLOCK_SIZE); + wbsm4_wsise_encrypt(block, block, wbsm4_key); + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + // int i; + // for (i = 0; i != 999999; ++i) + // wbsm4_wsise_encrypt(block, block, wbsm4_key); + + // if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected_iter, SM4_BLOCK_SIZE)) + // return 0; + // return 1; + (void)expected_iter; + + const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-WSISE-ECB"); + if (!TEST_ptr_ne(cipher, NULL)) + { + free(wbsm4_key); + return 0; + } + int key_length = EVP_CIPHER_get_key_length(cipher); + if (!TEST_int_eq(key_length, sizeof(wbsm4_wsise_key))) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new(); + if (!TEST_ptr_ne(cipher_ctx, NULL)) + { + free(wbsm4_key); + free(keybuf); + return 0; + } + + int ret = EVP_EncryptInit(cipher_ctx, cipher, (unsigned char *)keybuf, NULL); + if (!TEST_int_eq(ret, 1)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + + int outl = SM4_BLOCK_SIZE; + memcpy(block, input, SM4_BLOCK_SIZE); + ret = EVP_EncryptUpdate(cipher_ctx, block, &outl, block, SM4_BLOCK_SIZE); + if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) + { + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 0; + } + + EVP_CIPHER_CTX_free(cipher_ctx); + free(wbsm4_key); + free(keybuf); + return 1; +} + +#endif + +int setup_tests(void) +{ +#ifndef OPENSSL_NO_WBSM4 + ADD_TEST(test_wbsm4_Xiao_Lai); + ADD_TEST(test_wbsm4_Bai_Wu); + ADD_TEST(test_wbsm4_WSISE); +#endif + return 1; +} From e923ab140f8db66ab37c0874b58a39007c0474ce Mon Sep 17 00:00:00 2001 From: zhsnew Date: Wed, 13 Nov 2024 16:45:03 +0800 Subject: [PATCH 2/3] wbsm4: add newline, fill libcrypto.num --- providers/implementations/kdfs/wbsm4kdf.c | 2 +- util/libcrypto.num | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/providers/implementations/kdfs/wbsm4kdf.c b/providers/implementations/kdfs/wbsm4kdf.c index 625078490..ff5335579 100644 --- a/providers/implementations/kdfs/wbsm4kdf.c +++ b/providers/implementations/kdfs/wbsm4kdf.c @@ -316,4 +316,4 @@ const OSSL_DISPATCH ossl_kdf_wbsm4_functions[] = { (void (*)(void))kdf_wbsm4_gettable_ctx_params}, {OSSL_FUNC_KDF_GET_CTX_PARAMS, (void (*)(void))kdf_wbsm4_get_ctx_params}, {0, NULL}}; -#endif \ No newline at end of file +#endif diff --git a/util/libcrypto.num b/util/libcrypto.num index 2602396b4..96b29dabf 100644 --- a/util/libcrypto.num +++ b/util/libcrypto.num @@ -5697,3 +5697,24 @@ TSAPI_SM2Sign 6013 3_0_3 EXIST::FUNCTION:SM2,SM3 TSAPI_SM2Verify 6014 3_0_3 EXIST::FUNCTION:SM2,SM3 TSAPI_SM2Decrypt 6015 3_0_3 EXIST::FUNCTION:SM2 TSAPI_SDF_GenerateRandom 6016 3_0_3 EXIST::FUNCTION: +EVP_wbsm4_xiaolai_ecb 6017 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_xiaolai_cbc 6018 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_xiaolai_cfb128 6019 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_xiaolai_ofb 6020 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_xiaolai_ctr 6021 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_xiaolai_gcm 6022 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_xiaolai_ccm 6023 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_ecb 6024 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_cbc 6025 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_cfb128 6026 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_ofb 6027 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_ctr 6028 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_gcm 6029 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_baiwu_ccm 6030 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_ecb 6031 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_cbc 6032 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_cfb128 6033 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_ofb 6034 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_ctr 6035 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_gcm 6036 3_0_3 EXIST::FUNCTION:WBSM4 +EVP_wbsm4_wsise_ccm 6037 3_0_3 EXIST::FUNCTION:WBSM4 From 77cea54ac6951bee974e3fc59fd70e738e8ea1ae Mon Sep 17 00:00:00 2001 From: zhsnew Date: Fri, 15 Nov 2024 08:57:03 +0800 Subject: [PATCH 3/3] wbsm4: format code --- apps/enc.c | 9 +- apps/speed.c | 36 +- crypto/evp/e_wbsm4_baiwu.c | 70 +- crypto/evp/e_wbsm4_wsise.c | 76 +- crypto/evp/e_wbsm4_xiaolai.c | 80 +- crypto/sm4/build.info | 2 +- crypto/sm4/wb/Bai-Wu-wbsm4.c | 109 +- crypto/sm4/wb/WBMatrix.c | 2750 +++++++---------- crypto/sm4/wb/WBMatrix.h | 40 +- crypto/sm4/wb/WBRandom.h | 2 +- crypto/sm4/wb/WSISE-wbsm4.c | 87 +- crypto/sm4/wb/Xiao-Lai-wbsm4.c | 73 +- crypto/sm4/wb/wbsm4.c | 14 +- include/crypto/wbsm4.h | 23 +- include/crypto/wbstructure.h | 79 +- .../implementations/ciphers/cipher_wbsm4.c | 60 +- .../implementations/ciphers/cipher_wbsm4.h | 24 +- .../ciphers/cipher_wbsm4_ccm.c | 15 +- .../ciphers/cipher_wbsm4_ccm.h | 24 +- .../ciphers/cipher_wbsm4_ccm_hw.c | 19 +- .../ciphers/cipher_wbsm4_gcm.c | 6 +- .../ciphers/cipher_wbsm4_gcm.h | 24 +- .../ciphers/cipher_wbsm4_gcm_hw.c | 85 +- .../implementations/ciphers/cipher_wbsm4_hw.c | 112 +- providers/implementations/kdfs/wbsm4kdf.c | 92 +- test/wbsm4_internal_test.c | 144 +- 26 files changed, 1672 insertions(+), 2383 deletions(-) diff --git a/apps/enc.c b/apps/enc.c index b04c22359..5f304b32a 100644 --- a/apps/enc.c +++ b/apps/enc.c @@ -367,8 +367,7 @@ int enc_main(int argc, char **argv) #ifndef OPENSSL_NO_WBSM4 if (rawkey != NULL) { - if (cipher != NULL && rawkeylen != EVP_CIPHER_key_length(cipher)) - { + if (cipher != NULL && rawkeylen != EVP_CIPHER_key_length(cipher)) { BIO_printf(bio_err, "invalid raw key length: %d, need: %d\n", rawkeylen, EVP_CIPHER_key_length(cipher)); goto end; @@ -598,8 +597,7 @@ int enc_main(int argc, char **argv) #ifndef OPENSSL_NO_WBSM4 if (rawkey) { - if (!EVP_CipherInit_ex(ctx, NULL, NULL, rawkey, iv, enc)) - { + if (!EVP_CipherInit_ex(ctx, NULL, NULL, rawkey, iv, enc)) { BIO_printf(bio_err, "Error setting cipher %s\n", EVP_CIPHER_get0_name(cipher)); ERR_print_errors(bio_err); @@ -628,8 +626,7 @@ int enc_main(int argc, char **argv) printf("\n"); } #ifndef OPENSSL_NO_WBSM4 - if (rawkey) - { + if (rawkey) { printf("key="); for (i = 0; i < EVP_CIPHER_get_key_length(cipher) && i < 32; i++) printf("%02X", rawkey[i]); diff --git a/apps/speed.c b/apps/speed.c index af81d9555..a46127274 100644 --- a/apps/speed.c +++ b/apps/speed.c @@ -3129,11 +3129,9 @@ int speed_main(int argc, char **argv) } #endif #ifndef OPENSSL_NO_WBSM4 - for (k = 0; k < 2; k++) - { + for (k = 0; k < 2; k++) { algindex = D_CBC_WBSM4_XIAOLAI + k; - if (doit[algindex]) - { + if (doit[algindex]) { int st = 1; const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-XIAOLAI"); @@ -3146,16 +3144,14 @@ int speed_main(int argc, char **argv) continue; RAND_bytes(local_key, keylen); - for (i = 0; st && i < loopargs_len; i++) - { + for (i = 0; st && i < loopargs_len; i++) { loopargs[i].ctx = init_evp_cipher_ctx(names[algindex], local_key, keylen); st = loopargs[i].ctx != NULL; } OPENSSL_free(local_key); - for (testnum = 0; st && testnum < size_num; testnum++) - { + for (testnum = 0; st && testnum < size_num; testnum++) { print_message(names[algindex], c[algindex][testnum], lengths[testnum], seconds.sym); Time_F(START); @@ -3168,11 +3164,9 @@ int speed_main(int argc, char **argv) EVP_CIPHER_CTX_free(loopargs[i].ctx); } } - for (k = 0; k < 2; k++) - { + for (k = 0; k < 2; k++) { algindex = D_CBC_WBSM4_BAIWU + k; - if (doit[algindex]) - { + if (doit[algindex]) { int st = 1; const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-BAIWU"); @@ -3185,16 +3179,14 @@ int speed_main(int argc, char **argv) continue; RAND_bytes(local_key, keylen); - for (i = 0; st && i < loopargs_len; i++) - { + for (i = 0; st && i < loopargs_len; i++) { loopargs[i].ctx = init_evp_cipher_ctx(names[algindex], local_key, keylen); st = loopargs[i].ctx != NULL; } OPENSSL_free(local_key); - for (testnum = 0; st && testnum < size_num; testnum++) - { + for (testnum = 0; st && testnum < size_num; testnum++) { print_message(names[algindex], c[algindex][testnum], lengths[testnum], seconds.sym); Time_F(START); @@ -3207,11 +3199,9 @@ int speed_main(int argc, char **argv) EVP_CIPHER_CTX_free(loopargs[i].ctx); } } - for (k = 0; k < 2; k++) - { + for (k = 0; k < 2; k++) { algindex = D_CBC_WBSM4_WSISE + k; - if (doit[algindex]) - { + if (doit[algindex]) { int st = 1; const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-WSISE"); @@ -3224,16 +3214,14 @@ int speed_main(int argc, char **argv) continue; RAND_bytes(local_key, keylen); - for (i = 0; st && i < loopargs_len; i++) - { + for (i = 0; st && i < loopargs_len; i++) { loopargs[i].ctx = init_evp_cipher_ctx(names[algindex], local_key, keylen); st = loopargs[i].ctx != NULL; } OPENSSL_free(local_key); - for (testnum = 0; st && testnum < size_num; testnum++) - { + for (testnum = 0; st && testnum < size_num; testnum++) { print_message(names[algindex], c[algindex][testnum], lengths[testnum], seconds.sym); Time_F(START); diff --git a/crypto/evp/e_wbsm4_baiwu.c b/crypto/evp/e_wbsm4_baiwu.c index 55766fde6..7270cbcf3 100644 --- a/crypto/evp/e_wbsm4_baiwu.c +++ b/crypto/evp/e_wbsm4_baiwu.c @@ -29,23 +29,28 @@ typedef struct { } EVP_WBSM4_BAIWU_KEY; # define BLOCK_CIPHER_generic(nid,blocksize,ivlen,nmode,mode,MODE,flags) \ -static const EVP_CIPHER wbsm4_baiwu_##mode = { \ - nid##_##nmode,blocksize,sizeof(wbsm4_baiwu_key),ivlen, \ - flags|EVP_CIPH_##MODE##_MODE, \ - EVP_ORIG_GLOBAL, \ - wbsm4_baiwu_init_key, \ - wbsm4_baiwu_##mode##_cipher, \ - NULL, \ - sizeof(EVP_WBSM4_BAIWU_KEY), \ - NULL,NULL,NULL,NULL }; \ -const EVP_CIPHER *EVP_wbsm4_baiwu_##mode(void) \ +static const EVP_CIPHER wbsm4_baiwu_##mode = { \ + nid##_##nmode,blocksize,sizeof(wbsm4_baiwu_key),ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_baiwu_init_key, \ + wbsm4_baiwu_##mode##_cipher, \ + NULL, \ + sizeof(EVP_WBSM4_BAIWU_KEY), \ + NULL,NULL,NULL,NULL \ +}; \ +const EVP_CIPHER *EVP_wbsm4_baiwu_##mode(void) \ { return &wbsm4_baiwu_##mode; } -#define DEFINE_BLOCK_CIPHERS(nid,flags) \ - BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ +#define DEFINE_BLOCK_CIPHERS(nid,flags) \ + BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ BLOCK_CIPHER_generic(nid,1,16,ctr,ctr,CTR,flags) static int wbsm4_baiwu_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, @@ -138,20 +143,21 @@ static int wbsm4_baiwu_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, DEFINE_BLOCK_CIPHERS(NID_wbsm4_baiwu, 0) # define BLOCK_CIPHER_custom(nid,blocksize,ivlen,mode,MODE,flags) \ -static const EVP_CIPHER wbsm4_baiwu_##mode = { \ - nid##_##mode,blocksize, sizeof(wbsm4_baiwu_key), ivlen, \ - flags|EVP_CIPH_##MODE##_MODE, \ - EVP_ORIG_GLOBAL, \ - wbsm4_baiwu_##mode##_init, \ - wbsm4_baiwu_##mode##_cipher, \ - wbsm4_baiwu_##mode##_cleanup, \ - sizeof(EVP_SM4_##MODE##_CTX), \ - NULL,NULL,wbsm4_baiwu_##mode##_ctrl,NULL }; \ -const EVP_CIPHER *EVP_wbsm4_baiwu_##mode(void) \ +static const EVP_CIPHER wbsm4_baiwu_##mode = { \ + nid##_##mode,blocksize, sizeof(wbsm4_baiwu_key), ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_baiwu_##mode##_init, \ + wbsm4_baiwu_##mode##_cipher, \ + wbsm4_baiwu_##mode##_cleanup, \ + sizeof(EVP_SM4_##MODE##_CTX), \ + NULL,NULL,wbsm4_baiwu_##mode##_ctrl,NULL \ +}; \ +const EVP_CIPHER *EVP_wbsm4_baiwu_##mode(void) \ { return &wbsm4_baiwu_##mode; } typedef struct { - wbsm4_baiwu_key ks; /* WBSM4 key schedule to use */ + wbsm4_baiwu_key ks; /* WBSM4 key schedule to use */ int key_set; /* Set if key initialized */ int iv_set; /* Set if an iv is set */ GCM128_CONTEXT gcm; @@ -164,7 +170,7 @@ typedef struct { } EVP_SM4_GCM_CTX; typedef struct { - wbsm4_baiwu_key ks; /* WBSM4 key schedule to use */ + wbsm4_baiwu_key ks; /* WBSM4 key schedule to use */ int key_set; /* Set if key initialized */ int iv_set; /* Set if an iv is set */ int tag_set; /* Set if tag is valid */ @@ -485,7 +491,8 @@ static int wbsm4_baiwu_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, return -1; } else if (ctx->encrypt) { if (gctx->ctr != NULL) { - if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, + gctx->ctr)) return -1; } else { if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, len)) @@ -493,7 +500,8 @@ static int wbsm4_baiwu_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, } } else { if (gctx->ctr != NULL) { - if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, + gctx->ctr)) return -1; } else { if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, len)) @@ -737,9 +745,9 @@ static int wbsm4_baiwu_ccm_cleanup(EVP_CIPHER_CTX *c) return 1; } -#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ +#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ - | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH) BLOCK_CIPHER_custom(NID_wbsm4_baiwu, 1, 12, gcm, GCM, diff --git a/crypto/evp/e_wbsm4_wsise.c b/crypto/evp/e_wbsm4_wsise.c index 7e853e4ab..ccdd6b22d 100644 --- a/crypto/evp/e_wbsm4_wsise.c +++ b/crypto/evp/e_wbsm4_wsise.c @@ -29,23 +29,28 @@ typedef struct { } EVP_WBSM4_WSISE_KEY; # define BLOCK_CIPHER_generic(nid,blocksize,ivlen,nmode,mode,MODE,flags) \ -static const EVP_CIPHER wbsm4_wsise_##mode = { \ - nid##_##nmode,blocksize,sizeof(wbsm4_wsise_key),ivlen, \ - flags|EVP_CIPH_##MODE##_MODE, \ - EVP_ORIG_GLOBAL, \ - wbsm4_wsise_init_key, \ - wbsm4_wsise_##mode##_cipher, \ - NULL, \ - sizeof(EVP_WBSM4_WSISE_KEY), \ - NULL,NULL,NULL,NULL }; \ -const EVP_CIPHER *EVP_wbsm4_wsise_##mode(void) \ +static const EVP_CIPHER wbsm4_wsise_##mode = { \ + nid##_##nmode,blocksize,sizeof(wbsm4_wsise_key),ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_wsise_init_key, \ + wbsm4_wsise_##mode##_cipher, \ + NULL, \ + sizeof(EVP_WBSM4_WSISE_KEY), \ + NULL,NULL,NULL,NULL \ +}; \ +const EVP_CIPHER *EVP_wbsm4_wsise_##mode(void) \ { return &wbsm4_wsise_##mode; } -#define DEFINE_BLOCK_CIPHERS(nid,flags) \ - BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ +#define DEFINE_BLOCK_CIPHERS(nid,flags) \ + BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ BLOCK_CIPHER_generic(nid,1,16,ctr,ctr,CTR,flags) static int wbsm4_wsise_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, @@ -138,20 +143,21 @@ static int wbsm4_wsise_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, DEFINE_BLOCK_CIPHERS(NID_wbsm4_wsise, 0) # define BLOCK_CIPHER_custom(nid,blocksize,ivlen,mode,MODE,flags) \ -static const EVP_CIPHER wbsm4_wsise_##mode = { \ - nid##_##mode,blocksize, sizeof(wbsm4_wsise_key), ivlen, \ - flags|EVP_CIPH_##MODE##_MODE, \ - EVP_ORIG_GLOBAL, \ - wbsm4_wsise_##mode##_init, \ - wbsm4_wsise_##mode##_cipher, \ - wbsm4_wsise_##mode##_cleanup, \ - sizeof(EVP_SM4_##MODE##_CTX), \ - NULL,NULL,wbsm4_wsise_##mode##_ctrl,NULL }; \ -const EVP_CIPHER *EVP_wbsm4_wsise_##mode(void) \ +static const EVP_CIPHER wbsm4_wsise_##mode = { \ + nid##_##mode,blocksize, sizeof(wbsm4_wsise_key), ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_wsise_##mode##_init, \ + wbsm4_wsise_##mode##_cipher, \ + wbsm4_wsise_##mode##_cleanup, \ + sizeof(EVP_SM4_##MODE##_CTX), \ + NULL,NULL,wbsm4_wsise_##mode##_ctrl,NULL \ +}; \ +const EVP_CIPHER *EVP_wbsm4_wsise_##mode(void) \ { return &wbsm4_wsise_##mode; } typedef struct { - wbsm4_wsise_key ks; /* WBSM4 key schedule to use */ + wbsm4_wsise_key ks; /* WBSM4 key schedule to use */ int key_set; /* Set if key initialized */ int iv_set; /* Set if an iv is set */ GCM128_CONTEXT gcm; @@ -164,7 +170,7 @@ typedef struct { } EVP_SM4_GCM_CTX; typedef struct { - wbsm4_wsise_key ks; /* WBSM4 key schedule to use */ + wbsm4_wsise_key ks; /* WBSM4 key schedule to use */ int key_set; /* Set if key initialized */ int iv_set; /* Set if an iv is set */ int tag_set; /* Set if tag is valid */ @@ -175,14 +181,16 @@ typedef struct { ccm128_f str; } EVP_SM4_CCM_CTX; -static int wbsm4_wsise_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_wsise_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, + void *ptr); static int wbsm4_wsise_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); static int wbsm4_wsise_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len); static int wbsm4_wsise_gcm_cleanup(EVP_CIPHER_CTX *c); -static int wbsm4_wsise_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_wsise_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, + void *ptr); static int wbsm4_wsise_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); static int wbsm4_wsise_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, @@ -485,7 +493,8 @@ static int wbsm4_wsise_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, return -1; } else if (ctx->encrypt) { if (gctx->ctr != NULL) { - if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, + gctx->ctr)) return -1; } else { if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, len)) @@ -493,7 +502,8 @@ static int wbsm4_wsise_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, } } else { if (gctx->ctr != NULL) { - if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, + gctx->ctr)) return -1; } else { if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, len)) @@ -737,9 +747,9 @@ static int wbsm4_wsise_ccm_cleanup(EVP_CIPHER_CTX *c) return 1; } -#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ +#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ - | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH) BLOCK_CIPHER_custom(NID_wbsm4_wsise, 1, 12, gcm, GCM, diff --git a/crypto/evp/e_wbsm4_xiaolai.c b/crypto/evp/e_wbsm4_xiaolai.c index 0c1830e9e..29a817957 100644 --- a/crypto/evp/e_wbsm4_xiaolai.c +++ b/crypto/evp/e_wbsm4_xiaolai.c @@ -29,23 +29,28 @@ typedef struct { } EVP_WBSM4_XIAOLAI_KEY; # define BLOCK_CIPHER_generic(nid,blocksize,ivlen,nmode,mode,MODE,flags) \ -static const EVP_CIPHER wbsm4_xiaolai_##mode = { \ - nid##_##nmode,blocksize,sizeof(wbsm4_xiaolai_key),ivlen, \ - flags|EVP_CIPH_##MODE##_MODE, \ - EVP_ORIG_GLOBAL, \ - wbsm4_xiaolai_init_key, \ - wbsm4_xiaolai_##mode##_cipher, \ - NULL, \ - sizeof(EVP_WBSM4_XIAOLAI_KEY), \ - NULL,NULL,NULL,NULL }; \ -const EVP_CIPHER *EVP_wbsm4_xiaolai_##mode(void) \ +static const EVP_CIPHER wbsm4_xiaolai_##mode = { \ + nid##_##nmode,blocksize,sizeof(wbsm4_xiaolai_key),ivlen, \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_xiaolai_init_key, \ + wbsm4_xiaolai_##mode##_cipher, \ + NULL, \ + sizeof(EVP_WBSM4_XIAOLAI_KEY), \ + NULL,NULL,NULL,NULL \ +}; \ +const EVP_CIPHER *EVP_wbsm4_xiaolai_##mode(void) \ { return &wbsm4_xiaolai_##mode; } -#define DEFINE_BLOCK_CIPHERS(nid,flags) \ - BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ - BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ +#define DEFINE_BLOCK_CIPHERS(nid,flags) \ + BLOCK_CIPHER_generic(nid,16,16,cbc,cbc,CBC, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,16,0,ecb,ecb,ECB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,ofb128,ofb,OFB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ + BLOCK_CIPHER_generic(nid,1,16,cfb128,cfb,CFB, \ + flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \ BLOCK_CIPHER_generic(nid,1,16,ctr,ctr,CTR,flags) static int wbsm4_xiaolai_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, @@ -138,20 +143,21 @@ static int wbsm4_xiaolai_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, DEFINE_BLOCK_CIPHERS(NID_wbsm4_xiaolai, 0) # define BLOCK_CIPHER_custom(nid,blocksize,ivlen,mode,MODE,flags) \ -static const EVP_CIPHER wbsm4_xiaolai_##mode = { \ +static const EVP_CIPHER wbsm4_xiaolai_##mode = { \ nid##_##mode,blocksize, sizeof(wbsm4_xiaolai_key), ivlen, \ - flags|EVP_CIPH_##MODE##_MODE, \ - EVP_ORIG_GLOBAL, \ - wbsm4_xiaolai_##mode##_init, \ - wbsm4_xiaolai_##mode##_cipher, \ - wbsm4_xiaolai_##mode##_cleanup, \ - sizeof(EVP_SM4_##MODE##_CTX), \ - NULL,NULL,wbsm4_xiaolai_##mode##_ctrl,NULL }; \ -const EVP_CIPHER *EVP_wbsm4_xiaolai_##mode(void) \ + flags|EVP_CIPH_##MODE##_MODE, \ + EVP_ORIG_GLOBAL, \ + wbsm4_xiaolai_##mode##_init, \ + wbsm4_xiaolai_##mode##_cipher, \ + wbsm4_xiaolai_##mode##_cleanup, \ + sizeof(EVP_SM4_##MODE##_CTX), \ + NULL,NULL,wbsm4_xiaolai_##mode##_ctrl,NULL \ +}; \ +const EVP_CIPHER *EVP_wbsm4_xiaolai_##mode(void) \ { return &wbsm4_xiaolai_##mode; } typedef struct { - wbsm4_xiaolai_key ks; /* WBSM4 key schedule to use */ + wbsm4_xiaolai_key ks; /* WBSM4 key schedule to use */ int key_set; /* Set if key initialized */ int iv_set; /* Set if an iv is set */ GCM128_CONTEXT gcm; @@ -164,7 +170,7 @@ typedef struct { } EVP_SM4_GCM_CTX; typedef struct { - wbsm4_xiaolai_key ks; /* WBSM4 key schedule to use */ + wbsm4_xiaolai_key ks; /* WBSM4 key schedule to use */ int key_set; /* Set if key initialized */ int iv_set; /* Set if an iv is set */ int tag_set; /* Set if tag is valid */ @@ -175,14 +181,16 @@ typedef struct { ccm128_f str; } EVP_SM4_CCM_CTX; -static int wbsm4_xiaolai_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_xiaolai_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, + void *ptr); static int wbsm4_xiaolai_gcm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); static int wbsm4_xiaolai_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len); static int wbsm4_xiaolai_gcm_cleanup(EVP_CIPHER_CTX *c); -static int wbsm4_xiaolai_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr); +static int wbsm4_xiaolai_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, + void *ptr); static int wbsm4_xiaolai_ccm_init(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc); static int wbsm4_xiaolai_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, @@ -205,7 +213,8 @@ static void ctr64_inc(unsigned char *counter) } while (n); } -static int wbsm4_xiaolai_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +static int wbsm4_xiaolai_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, + void *ptr) { EVP_SM4_GCM_CTX *gctx = EVP_C_DATA(EVP_SM4_GCM_CTX,c); @@ -485,7 +494,8 @@ static int wbsm4_xiaolai_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, return -1; } else if (ctx->encrypt) { if (gctx->ctr != NULL) { - if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in, out, len, + gctx->ctr)) return -1; } else { if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, len)) @@ -493,7 +503,8 @@ static int wbsm4_xiaolai_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, } } else { if (gctx->ctr != NULL) { - if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, gctx->ctr)) + if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in, out, len, + gctx->ctr)) return -1; } else { if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, len)) @@ -534,7 +545,8 @@ static int wbsm4_xiaolai_gcm_cleanup(EVP_CIPHER_CTX *c) return 1; } -static int wbsm4_xiaolai_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) +static int wbsm4_xiaolai_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, + void *ptr) { EVP_SM4_CCM_CTX *cctx = EVP_C_DATA(EVP_SM4_CCM_CTX,c); @@ -737,9 +749,9 @@ static int wbsm4_xiaolai_ccm_cleanup(EVP_CIPHER_CTX *c) return 1; } -#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ +#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \ | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \ - | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ + | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \ | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH) BLOCK_CIPHER_custom(NID_wbsm4_xiaolai, 1, 12, gcm, GCM, diff --git a/crypto/sm4/build.info b/crypto/sm4/build.info index 33fa0e3e5..1f549a09f 100644 --- a/crypto/sm4/build.info +++ b/crypto/sm4/build.info @@ -15,7 +15,7 @@ ENDIF SOURCE[../../libcrypto]= $SM4ASM sm4.c IF[{- !$disabled{wbsm4} -}] -SOURCE[../../libcrypto]= ${SOURCE[../../libcrypto]} \ +SOURCE[../../libcrypto]=${SOURCE[../../libcrypto]} \ wb/wbsm4.c \ wb/Bai-Wu-wbsm4.c \ wb/Xiao-Lai-wbsm4.c \ diff --git a/crypto/sm4/wb/Bai-Wu-wbsm4.c b/crypto/sm4/wb/Bai-Wu-wbsm4.c index e211c46f6..dc46e8e81 100644 --- a/crypto/sm4/wb/Bai-Wu-wbsm4.c +++ b/crypto/sm4/wb/Bai-Wu-wbsm4.c @@ -26,7 +26,7 @@ (ct)[2] = (uint8_t)((st) >> 8); \ (ct)[3] = (uint8_t)(st) -static uint8_t SBOX[256]={ +static uint8_t SBOX[256] = { 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, @@ -106,8 +106,7 @@ void wbsm4_baiwu_set_key(const uint8_t *key, wbsm4_baiwu_key *wbsm4_key) uint8_t *p = (uint8_t *)wbsm4_key; uint8_t *end = p + sizeof(wbsm4_baiwu_key); - while (p < end) - { + while (p < end) { uint8_t t; t = p[0]; p[0] = p[3]; @@ -132,8 +131,7 @@ void wbsm4_baiwu_export_key(const wbsm4_baiwu_key *wbsm4_key, uint8_t *key) uint8_t *p = (uint8_t *)out; uint8_t *end = p + sizeof(wbsm4_baiwu_key); - while (p < end) - { + while (p < end) { uint8_t t; t = p[0]; p[0] = p[3]; @@ -152,7 +150,8 @@ void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key) int i, j, r, x, y; uint8_t temp_u8_x, temp_u8_y, temp_u8; uint32_t temp_u32; - uint32_t TD0_u32[6], TD1_u32[6], TD2_u32[3], TR_u32[4], Lc[36], Ec0[32], Ec1[32]; + uint32_t TD0_u32[6], TD1_u32[6], TD2_u32[3], TR_u32[4], Lc[36]; + uint32_t Ec0[32], Ec1[32]; M32 L[36]; M32 L_inv[36]; M8 E[32][2][4]; @@ -165,22 +164,21 @@ void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key) uint32_t SK[32]; wbsm4_sm4_setkey(SK, sm4_key); - for (r = 0; r < 36; r++) - { + for (r = 0; r < 36; r++) { genMatpairM32(&L[r], &L_inv[r]); Lc[r] = cus_random(); } - for (r = 0; r < 32; r++) - { - for (j = 0; j < 4; j++) - { + for (r = 0; r < 32; r++) { + for (j = 0; j < 4; j++) { genMatpairM8(&E[r][0][j], &E_inv[r][0][j]); genMatpairM8(&E[r][1][j], &E_inv[r][1][j]); } - MatrixcomM8to32(E_inv[r][0][0], E_inv[r][0][1], E_inv[r][0][2], E_inv[r][0][3], &Ei_inv[r][0]); - MatrixcomM8to32(E_inv[r][1][0], E_inv[r][1][1], E_inv[r][1][2], E_inv[r][1][3], &Ei_inv[r][1]); + MatrixcomM8to32(E_inv[r][0][0], E_inv[r][0][1], E_inv[r][0][2], + E_inv[r][0][3], &Ei_inv[r][0]); + MatrixcomM8to32(E_inv[r][1][0], E_inv[r][1][1], E_inv[r][1][2], + E_inv[r][1][3], &Ei_inv[r][1]); MatMulMatM32(Ei_inv[r][0], L_inv[r + 1], &M[r][0][0]); MatMulMatM32(Ei_inv[r][0], L_inv[r + 2], &M[r][0][1]); @@ -197,80 +195,77 @@ void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key) { MatMulMatM32(L[r + 4], L_matrix, &LL); - for (i = 0; i < 6; i++) - { + for (i = 0; i < 6; i++) { TD0_u32[i] = cus_random(); TD1_u32[i] = cus_random(); } - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { TR_u32[i] = cus_random(); } - for (i = 0; i < 3; i++) - { + for (i = 0; i < 3; i++) { TD2_u32[i] = cus_random(); } - Ec0[r] = TD0_u32[0] ^ TD0_u32[1] ^ TD0_u32[2] ^ TD0_u32[3] ^ TD0_u32[4] ^ TD0_u32[5]; - Ec1[r] = TD1_u32[0] ^ TD1_u32[1] ^ TD1_u32[2] ^ TD1_u32[3] ^ TD1_u32[4] ^ TD1_u32[5]; + Ec0[r] = TD0_u32[0] ^ TD0_u32[1] ^ TD0_u32[2] ^ TD0_u32[3] ^ + TD0_u32[4] ^ TD0_u32[5]; + Ec1[r] = TD1_u32[0] ^ TD1_u32[1] ^ TD1_u32[2] ^ TD1_u32[3] ^ + TD1_u32[4] ^ TD1_u32[5]; - for (x = 0; x < 256; x++) - { - for (j = 0; j < 4; j++) - { + for (x = 0; x < 256; x++) { + for (j = 0; j < 4; j++) { temp_u8 = x ^ ((Lc[r] >> (24 - j * 8)) & 0xff); temp_u32 = temp_u8 << (24 - j * 8); wbsm4_key->TD[r][0][j][x] = MatMulNumM32(C[r], temp_u32); } - for (j = 0; j < 3; j++) - { + for (j = 0; j < 3; j++) { wbsm4_key->TD[r][0][j][x] ^= TD2_u32[j]; } - wbsm4_key->TD[r][0][3][x] ^= Lc[r + 4] ^ TD2_u32[0] ^ TD2_u32[1] ^ TD2_u32[2] ^ TR_u32[0] ^ TR_u32[1] ^ TR_u32[2] ^ TR_u32[3]; + wbsm4_key->TD[r][0][3][x] ^= Lc[r + 4] ^ TD2_u32[0] ^ TD2_u32[1] ^ + TD2_u32[2] ^ TR_u32[0] ^ TR_u32[1] ^ + TR_u32[2] ^ TR_u32[3]; - for (i = 1; i < 4; i++) - { + for (i = 1; i < 4; i++) { temp_u8 = x ^ ((Lc[r + i] >> 24) & 0xff); temp_u32 = temp_u8 << 24; - wbsm4_key->TD[r][i][0][x] = MatMulNumM32(M[r][0][i - 1], temp_u32); + wbsm4_key->TD[r][i][0][x] = MatMulNumM32(M[r][0][i - 1], + temp_u32); temp_u8 = x ^ ((Lc[r + i] >> 16) & 0xff); temp_u32 = temp_u8 << 16; - wbsm4_key->TD[r][i][1][x] = MatMulNumM32(M[r][0][i - 1], temp_u32); + wbsm4_key->TD[r][i][1][x] = MatMulNumM32(M[r][0][i - 1], + temp_u32); temp_u8 = x ^ ((Lc[r + i] >> 8) & 0xff); temp_u32 = temp_u8 << 8; - wbsm4_key->TD[r][i][2][x] = MatMulNumM32(M[r][1][i - 1], temp_u32); + wbsm4_key->TD[r][i][2][x] = MatMulNumM32(M[r][1][i - 1], + temp_u32); temp_u8 = x ^ (Lc[r + i] & 0xff); temp_u32 = temp_u8; - wbsm4_key->TD[r][i][3][x] = MatMulNumM32(M[r][1][i - 1], temp_u32); + wbsm4_key->TD[r][i][3][x] = MatMulNumM32(M[r][1][i - 1], + temp_u32); } j = 0; - for (i = 1; i < 4; i++) - { + for (i = 1; i < 4; i++) { wbsm4_key->TD[r][i][0][x] ^= TD0_u32[j++]; wbsm4_key->TD[r][i][1][x] ^= TD0_u32[j++]; } j = 0; - for (i = 1; i < 4; i++) - { + for (i = 1; i < 4; i++) { wbsm4_key->TD[r][i][2][x] ^= TD1_u32[j++]; wbsm4_key->TD[r][i][3][x] ^= TD1_u32[j++]; } } - for (x = 0; x < 256; x++) - { - for (y = 0; y < 256; y++) - { - for (j = 0; j < 4; j++) - { + for (x = 0; x < 256; x++) { + for (y = 0; y < 256; y++) { + for (j = 0; j < 4; j++) { temp_u8_x = x ^ ((Ec0[r] >> (24 - j * 8)) & 0xff); temp_u8_x = MatMulNumM8(E[r][0][j], temp_u8_x); temp_u8_y = y ^ ((Ec1[r] >> (24 - j * 8)) & 0xff); temp_u8_y = MatMulNumM8(E[r][1][j], temp_u8_y); - temp_u8 = SBOX[temp_u8_x ^ temp_u8_y ^ ((SK[r] >> (24 - j * 8)) & 0xff)]; + temp_u8 = SBOX[temp_u8_x ^ temp_u8_y ^ + ((SK[r] >> (24 - j * 8)) & 0xff)]; temp_u32 = temp_u8 << (24 - j * 8); wbsm4_key->TR[r][j][x][y] = MatMulNumM32(LL, temp_u32); wbsm4_key->TR[r][j][x][y] ^= TR_u32[j]; @@ -279,9 +274,8 @@ void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key) } } - // external encoding - for (i = 0; i < 4; i++) - { + /* external encoding */ + for (i = 0; i < 4; i++) { wbsm4_key->SE[i].Mat = L[i]; wbsm4_key->SE[i].Vec.V = Lc[i]; @@ -290,7 +284,8 @@ void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key) } } -void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_baiwu_key *wbsm4_key) +void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], + const wbsm4_baiwu_key *wbsm4_key) { int r, i, j; uint32_t x[36]; @@ -305,22 +300,20 @@ void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], const wb x[2] = affineU32(wbsm4_key->SE[2], x[2]); x[3] = affineU32(wbsm4_key->SE[3], x[3]); - for (r = 0; r < 32; r++) - { + for (r = 0; r < 32; r++) { x[r + 4] = 0; s0 = 0; s1 = 0; - for (i = 1; i < 4; i++) - { + for (i = 1; i < 4; i++) { s0 ^= wbsm4_key->TD[r][i][0][(x[r + i] >> 24) & 0xff]; s0 ^= wbsm4_key->TD[r][i][1][(x[r + i] >> 16) & 0xff]; s1 ^= wbsm4_key->TD[r][i][2][(x[r + i] >> 8) & 0xff]; s1 ^= wbsm4_key->TD[r][i][3][x[r + i] & 0xff]; } - for (j = 0; j < 4; j++) - { - x[r + 4] ^= wbsm4_key->TR[r][j][(s0 >> (24 - j * 8)) & 0xff][(s1 >> (24 - j * 8)) & 0xff]; + for (j = 0; j < 4; j++) { + x[r + 4] ^= wbsm4_key->TR[r][j][(s0 >> (24 - j * 8)) & + 0xff][(s1 >> (24 - j * 8)) & 0xff]; x[r + 4] ^= wbsm4_key->TD[r][0][j][(x[r] >> (24 - j * 8)) & 0xff]; } } @@ -333,4 +326,4 @@ void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], const wb PUT32(x[34], OUT + 4); PUT32(x[33], OUT + 8); PUT32(x[32], OUT + 12); -} \ No newline at end of file +} diff --git a/crypto/sm4/wb/WBMatrix.c b/crypto/sm4/wb/WBMatrix.c index cd840b2e4..1f1a6d7c4 100644 --- a/crypto/sm4/wb/WBMatrix.c +++ b/crypto/sm4/wb/WBMatrix.c @@ -12,190 +12,205 @@ #include #include "WBMatrix.h" -// 8bit internal xor table -static int xor [] = {0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, - 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, - 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, - 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, - 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, - 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, - 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, - 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0}; - -// 8bit Hamming weight table -static int HW[] = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, - 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, - 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, - 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, - 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, - 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, - 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, - 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, - 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, - 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, - 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, - 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, - 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; +/* 8bit internal xor table */ +static int xor [] = { + 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, + 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, + 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 +}; + +/* 8bit Hamming weight table */ +static int HW[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, + 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, + 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, + 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, + 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, + 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, + 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, + 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, + 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, + 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, + 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, + 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 +}; static uint8_t idM4[4] = {0x08, 0x04, 0x02, 0x01}; static uint8_t idM8[8] = {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01}; -static uint16_t idM16[16] = {0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; -static uint32_t idM32[32] = {0x80000000, 0x40000000, 0x20000000, 0x10000000, 0x8000000, 0x4000000, 0x2000000, 0x1000000, 0x800000, 0x400000, 0x200000, 0x100000, 0x80000, 0x40000, 0x20000, 0x10000, 0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; -static uint64_t idM64[64] = {0x8000000000000000, 0x4000000000000000, 0x2000000000000000, 0x1000000000000000, 0x800000000000000, 0x400000000000000, 0x200000000000000, 0x100000000000000, 0x80000000000000, 0x40000000000000, 0x20000000000000, 0x10000000000000, 0x8000000000000, 0x4000000000000, 0x2000000000000, 0x1000000000000, 0x800000000000, 0x400000000000, 0x200000000000, 0x100000000000, 0x80000000000, 0x40000000000, 0x20000000000, 0x10000000000, 0x8000000000, 0x4000000000, 0x2000000000, 0x1000000000, 0x800000000, 0x400000000, 0x200000000, 0x100000000, - 0x80000000, 0x40000000, 0x20000000, 0x10000000, 0x8000000, 0x4000000, 0x2000000, 0x1000000, 0x800000, 0x400000, 0x200000, 0x100000, 0x80000, 0x40000, 0x20000, 0x10000, 0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; - -void initM4(M4 *Mat) // initial Matrix 4*4 +static uint16_t idM16[16] = {0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, + 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, + 0x2, 0x1}; +static uint32_t idM32[32] = {0x80000000, 0x40000000, 0x20000000, 0x10000000, + 0x8000000, 0x4000000, 0x2000000, 0x1000000, + 0x800000, 0x400000, 0x200000, 0x100000, 0x80000, + 0x40000, 0x20000, 0x10000, 0x8000, 0x4000, 0x2000, + 0x1000, 0x800, 0x400, 0x200, 0x100, 0x80, 0x40, + 0x20, 0x10, 0x8, 0x4, 0x2, 0x1}; +static uint64_t idM64[64] = {0x8000000000000000, 0x4000000000000000, + 0x2000000000000000, 0x1000000000000000, + 0x800000000000000, 0x400000000000000, + 0x200000000000000, 0x100000000000000, + 0x80000000000000, 0x40000000000000, + 0x20000000000000, 0x10000000000000, + 0x8000000000000, 0x4000000000000, 0x2000000000000, + 0x1000000000000, 0x800000000000, 0x400000000000, + 0x200000000000, 0x100000000000, 0x80000000000, + 0x40000000000, 0x20000000000, 0x10000000000, + 0x8000000000, 0x4000000000, 0x2000000000, + 0x1000000000, 0x800000000, 0x400000000, + 0x200000000, 0x100000000, 0x80000000, 0x40000000, + 0x20000000, 0x10000000, 0x8000000, 0x4000000, + 0x2000000, 0x1000000, 0x800000, 0x400000, 0x200000, + 0x100000, 0x80000, 0x40000, 0x20000, 0x10000, + 0x8000, 0x4000, 0x2000, 0x1000, 0x800, 0x400, + 0x200, 0x100, 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, + 0x2, 0x1}; + +void initM4(M4 *Mat) /* initial Matrix 4*4 */ { int i; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { (*Mat).M[i] = 0; } } -void initM8(M8 *Mat) // initial Matrix 8*8 +void initM8(M8 *Mat) /* initial Matrix 8*8 */ { int i; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { (*Mat).M[i] = 0; } } -void initM16(M16 *Mat) // initial Matrix 16*16 +void initM16(M16 *Mat) /* initial Matrix 16*16 */ { int i; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { (*Mat).M[i] = 0; } } -void initM32(M32 *Mat) // initial Matrix 32*32 +void initM32(M32 *Mat) /* initial Matrix 32*32 */ { int i; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { (*Mat).M[i] = 0; } } -void initM64(M64 *Mat) // initial Matrix 64*64 +void initM64(M64 *Mat) /* initial Matrix 64*64 */ { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { (*Mat).M[i] = 0; } } -void initM128(M128 *Mat) // initial Matrix 128*128 +void initM128(M128 *Mat) /* initial Matrix 128*128 */ { int i; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { (*Mat).M[i][0] = 0; (*Mat).M[i][1] = 0; } } -void initM256(M256 *Mat) // initial Matrix 256*256 +void initM256(M256 *Mat) /* initial Matrix 256*256 */ { int i; - for (i = 0; i < 256; i++) - { + for (i = 0; i < 256; i++) { (*Mat).M[i][0] = 0; (*Mat).M[i][1] = 0; (*Mat).M[i][2] = 0; (*Mat).M[i][3] = 0; } } -void initV4(V4 *Vec) // initial Vector 4*1 +void initV4(V4 *Vec) /* initial Vector 4*1 */ { (*Vec).V = 0; } -void initV8(V8 *Vec) // initial Vector 8*1 +void initV8(V8 *Vec) /* initial Vector 8*1 */ { (*Vec).V = 0; } -void initV16(V16 *Vec) // initial Vector 16*1 +void initV16(V16 *Vec) /* initial Vector 16*1 */ { (*Vec).V = 0; } -void initV32(V32 *Vec) // initial Vector 32*1 +void initV32(V32 *Vec) /* initial Vector 32*1 */ { (*Vec).V = 0; } -void initV64(V64 *Vec) // initial Vector 64*1 +void initV64(V64 *Vec) /* initial Vector 64*1 */ { (*Vec).V = 0; } -void initV128(V128 *Vec) // initial Vector 128*1 +void initV128(V128 *Vec) /* initial Vector 128*1 */ { (*Vec).V[0] = 0; (*Vec).V[1] = 0; } -void initV256(V256 *Vec) // initial Vector 256*1 +void initV256(V256 *Vec) /* initial Vector 256*1 */ { (*Vec).V[0] = 0; (*Vec).V[1] = 0; (*Vec).V[2] = 0; (*Vec).V[3] = 0; } -void randM4(M4 *Mat) // randomize Matrix 4*4 +void randM4(M4 *Mat) /* randomize Matrix 4*4 */ { int i; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 4; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 4; i++) { (*Mat).M[i] = cus_random() & 0x0f; } } -void randM8(M8 *Mat) // randomize Matrix 8*8 +void randM8(M8 *Mat) /* randomize Matrix 8*8 */ { int i; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 8; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 8; i++) { (*Mat).M[i] = cus_random(); } } -void randM16(M16 *Mat) // randomize Matrix 16*16 +void randM16(M16 *Mat) /* randomize Matrix 16*16 */ { int i; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 16; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 16; i++) { (*Mat).M[i] = cus_random(); } } -void randM32(M32 *Mat) // randomize Matrix 32*32 +void randM32(M32 *Mat) /* randomize Matrix 32*32 */ { int i; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 32; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 32; i++) { (*Mat).M[i] = cus_random(); } } -void randM64(M64 *Mat) // randomize Matrix 64*64 +void randM64(M64 *Mat) /* randomize Matrix 64*64 */ { int i; uint32_t *m; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 64; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 64; i++) { m = (uint32_t *)&((*Mat).M[i]); *(m + 1) = cus_random(); *m = cus_random(); } } -void randM128(M128 *Mat) // randomize Matrix 128*128 +void randM128(M128 *Mat) /* randomize Matrix 128*128 */ { int i; uint32_t *m; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 128; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 128; i++) { m = (uint32_t *)&((*Mat).M[i][0]); *(m + 1) = cus_random(); *m = cus_random(); @@ -204,13 +219,12 @@ void randM128(M128 *Mat) // randomize Matrix 128*128 *m = cus_random(); } } -void randM256(M256 *Mat) // randomize Matrix 256*256 +void randM256(M256 *Mat) /* randomize Matrix 256*256 */ { int i; uint32_t *m; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); - for (i = 0; i < 256; i++) - { + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ + for (i = 0; i < 256; i++) { m = (uint32_t *)&((*Mat).M[i][0]); *(m + 1) = cus_random(); *m = cus_random(); @@ -225,132 +239,121 @@ void randM256(M256 *Mat) // randomize Matrix 256*256 *m = cus_random(); } } -void identityM4(M4 *Mat) // identity matrix 4*4 +void identityM4(M4 *Mat) /* identity matrix 4*4 */ { int i; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { (*Mat).M[i] = idM4[i]; } } -void identityM8(M8 *Mat) // identity matrix 8*8 +void identityM8(M8 *Mat) /* identity matrix 8*8 */ { int i; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { (*Mat).M[i] = idM8[i]; } } -void identityM16(M16 *Mat) // identity matrix 16*16 +void identityM16(M16 *Mat) /* identity matrix 16*16 */ { int i; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { (*Mat).M[i] = idM16[i]; } } -void identityM32(M32 *Mat) // identity matrix 32*32 +void identityM32(M32 *Mat) /* identity matrix 32*32 */ { int i; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { (*Mat).M[i] = idM32[i]; } } -void identityM64(M64 *Mat) // identity matrix 64*64 +void identityM64(M64 *Mat) /* identity matrix 64*64 */ { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { (*Mat).M[i] = idM64[i]; } } -void identityM128(M128 *Mat) // identity matrix 128*128 +void identityM128(M128 *Mat) /* identity matrix 128*128 */ { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { (*Mat).M[i][0] = idM64[i]; (*Mat).M[i][1] = 0; } - for (i = 64; i < 128; i++) - { + for (i = 64; i < 128; i++) { (*Mat).M[i][0] = 0; (*Mat).M[i][1] = idM64[i - 64]; } } -void identityM256(M256 *Mat) // identity matrix 256*256 +void identityM256(M256 *Mat) /* identity matrix 256*256 */ { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { (*Mat).M[i][0] = idM64[i]; (*Mat).M[i][1] = 0; (*Mat).M[i][2] = 0; (*Mat).M[i][3] = 0; } - for (i = 64; i < 128; i++) - { + for (i = 64; i < 128; i++) { (*Mat).M[i][0] = 0; (*Mat).M[i][1] = idM64[i - 64]; (*Mat).M[i][2] = 0; (*Mat).M[i][3] = 0; } - for (i = 128; i < 192; i++) - { + for (i = 128; i < 192; i++) { (*Mat).M[i][0] = 0; (*Mat).M[i][1] = 0; (*Mat).M[i][2] = idM64[i - 128]; (*Mat).M[i][3] = 0; } - for (i = 192; i < 256; i++) - { + for (i = 192; i < 256; i++) { (*Mat).M[i][0] = 0; (*Mat).M[i][1] = 0; (*Mat).M[i][2] = 0; (*Mat).M[i][3] = idM64[i - 192]; } } -void randV4(V4 *Vec) // randomize Vector 4*1 +void randV4(V4 *Vec) /* randomize Vector 4*1 */ { - // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + /* InitRandom((randseed++) ^ (unsigned int)time(NULL)); */ (*Vec).V = cus_random() & 0x0f; } -void randV8(V8 *Vec) // randomize Vector 8*1 +void randV8(V8 *Vec) /* randomize Vector 8*1 */ { - // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + /* InitRandom((randseed++) ^ (unsigned int)time(NULL)); */ (*Vec).V = cus_random(); } -void randV16(V16 *Vec) // randomize Vector 16*1 +void randV16(V16 *Vec) /* randomize Vector 16*1 */ { - // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + /* InitRandom((randseed++) ^ (unsigned int)time(NULL)); */ (*Vec).V = cus_random(); } -void randV32(V32 *Vec) // randomize Vector 32*1 +void randV32(V32 *Vec) /* randomize Vector 32*1 */ { - // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + /* InitRandom((randseed++) ^ (unsigned int)time(NULL)); */ (*Vec).V = cus_random(); } -void randV64(V64 *Vec) // randomize Vector 64*1 +void randV64(V64 *Vec) /* randomize Vector 64*1 */ { uint32_t *v = (uint32_t *)&((*Vec).V); *(v + 1) = cus_random(); *v = cus_random(); } -void randV128(V128 *Vec) // randomize Vector 128*1 +void randV128(V128 *Vec) /* randomize Vector 128*1 */ { uint32_t *v = (uint32_t *)&((*Vec).V[0]); - // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + /* InitRandom((randseed++) ^ (unsigned int)time(NULL)); */ *(v + 1) = cus_random(); *v = cus_random(); v = (uint32_t *)&((*Vec).V[1]); *(v + 1) = cus_random(); *v = cus_random(); } -void randV256(V256 *Vec) // randomize Vector 256*1 +void randV256(V256 *Vec) /* randomize Vector 256*1 */ { uint32_t *v = (uint32_t *)&((*Vec).V[0]); - // InitRandom((randseed++) ^ (unsigned int)time(NULL)); + /* InitRandom((randseed++) ^ (unsigned int)time(NULL)); */ *(v + 1) = cus_random(); *v = cus_random(); v = (uint32_t *)&((*Vec).V[1]); @@ -363,92 +366,85 @@ void randV256(V256 *Vec) // randomize Vector 256*1 *(v + 1) = cus_random(); *v = cus_random(); } -void printM4(M4 Mat) // printf Matrix 4*4 +void printM4(M4 Mat) /* printf Matrix 4*4 */ { int i; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { printf("0x%x\n", Mat.M[i]); } } -void printM8(M8 Mat) // printf Matrix 8*8 +void printM8(M8 Mat) /* printf Matrix 8*8 */ { int i; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { printf("0x%x\n", Mat.M[i]); } } -void printM16(M16 Mat) // printf Matrix 16*16 +void printM16(M16 Mat) /* printf Matrix 16*16 */ { int i; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { printf("0x%x\n", Mat.M[i]); } } -void printM32(M32 Mat) // printf Matrix 32*32 +void printM32(M32 Mat) /* printf Matrix 32*32 */ { int i; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { printf("0x%x\n", Mat.M[i]); } } -void printM64(M64 Mat) // printf Matrix 64*64 +void printM64(M64 Mat) /* printf Matrix 64*64 */ { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { printf("0x%" PRIx64 "\n", Mat.M[i]); } } -void printM128(M128 Mat) // printf Matrix 128*128 +void printM128(M128 Mat) /* printf Matrix 128*128 */ { int i; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { printf("0x%" PRIx64 " ", Mat.M[i][0]); printf("0x%" PRIx64 "\n", Mat.M[i][1]); } } -void printM256(M256 Mat) // printf Matrix 256*256 +void printM256(M256 Mat) /* printf Matrix 256*256 */ { int i; - for (i = 0; i < 256; i++) - { + for (i = 0; i < 256; i++) { printf("0x%" PRIx64 " ", Mat.M[i][0]); printf("0x%" PRIx64 " ", Mat.M[i][1]); printf("0x%" PRIx64 " ", Mat.M[i][2]); printf("0x%" PRIx64 "\n", Mat.M[i][3]); } } -void printV4(V4 Vec) // printf Vector 4*1 +void printV4(V4 Vec) /* printf Vector 4*1 */ { printf("0x%x\n", Vec.V); } -void printV8(V8 Vec) // printf Vector 8*1 +void printV8(V8 Vec) /* printf Vector 8*1 */ { printf("0x%x\n", Vec.V); } -void printV16(V16 Vec) // printf Vector 16*1 +void printV16(V16 Vec) /* printf Vector 16*1 */ { printf("0x%x\n", Vec.V); } -void printV32(V32 Vec) // printf Vector 32*1 +void printV32(V32 Vec) /* printf Vector 32*1 */ { printf("0x%x\n", Vec.V); } -void printV64(V64 Vec) // printf Vector 64*1 +void printV64(V64 Vec) /* printf Vector 64*1 */ { printf("0x%" PRIx64 "\n", Vec.V); } -void printV128(V128 Vec) // printf Vector 128*1 +void printV128(V128 Vec) /* printf Vector 128*1 */ { printf("0x%" PRIx64 " ", Vec.V[0]); printf("0x%" PRIx64 "\n", Vec.V[1]); } -void printV256(V256 Vec) // printf Vector 256*1 +void printV256(V256 Vec) /* printf Vector 256*1 */ { printf("0x%" PRIx64 " ", Vec.V[0]); printf("0x%" PRIx64 " ", Vec.V[1]); @@ -458,48 +454,42 @@ void printV256(V256 Vec) // printf Vector 256*1 void copyM4(M4 Mat1, M4 *Mat2) { int i; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { (*Mat2).M[i] = Mat1.M[i]; } } void copyM8(M8 Mat1, M8 *Mat2) { int i; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { (*Mat2).M[i] = Mat1.M[i]; } } void copyM16(M16 Mat1, M16 *Mat2) { int i; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { (*Mat2).M[i] = Mat1.M[i]; } } void copyM32(M32 Mat1, M32 *Mat2) { int i; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { (*Mat2).M[i] = Mat1.M[i]; } } void copyM64(M64 Mat1, M64 *Mat2) { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { (*Mat2).M[i] = Mat1.M[i]; } } void copyM128(M128 Mat1, M128 *Mat2) { int i; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { (*Mat2).M[i][0] = Mat1.M[i][0]; (*Mat2).M[i][1] = Mat1.M[i][1]; } @@ -507,8 +497,7 @@ void copyM128(M128 Mat1, M128 *Mat2) void copyM256(M256 Mat1, M256 *Mat2) { int i; - for (i = 0; i < 256; i++) - { + for (i = 0; i < 256; i++) { (*Mat2).M[i][0] = Mat1.M[i][0]; (*Mat2).M[i][1] = Mat1.M[i][1]; (*Mat2).M[i][2] = Mat1.M[i][2]; @@ -518,8 +507,7 @@ void copyM256(M256 Mat1, M256 *Mat2) int isequalM4(M4 Mat1, M4 Mat2) { int i; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { if (Mat1.M[i] != Mat2.M[i]) return 0; } @@ -528,8 +516,7 @@ int isequalM4(M4 Mat1, M4 Mat2) int isequalM8(M8 Mat1, M8 Mat2) { int i; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { if (Mat1.M[i] != Mat2.M[i]) return 0; } @@ -538,8 +525,7 @@ int isequalM8(M8 Mat1, M8 Mat2) int isequalM16(M16 Mat1, M16 Mat2) { int i; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { if (Mat1.M[i] != Mat2.M[i]) return 0; } @@ -548,8 +534,7 @@ int isequalM16(M16 Mat1, M16 Mat2) int isequalM32(M32 Mat1, M32 Mat2) { int i; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { if (Mat1.M[i] != Mat2.M[i]) return 0; } @@ -558,8 +543,7 @@ int isequalM32(M32 Mat1, M32 Mat2) int isequalM64(M64 Mat1, M64 Mat2) { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { if (Mat1.M[i] != Mat2.M[i]) return 0; } @@ -568,8 +552,7 @@ int isequalM64(M64 Mat1, M64 Mat2) int isequalM128(M128 Mat1, M128 Mat2) { int i; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { if (Mat1.M[i][0] != Mat2.M[i][0]) return 0; if (Mat1.M[i][1] != Mat2.M[i][1]) @@ -580,8 +563,7 @@ int isequalM128(M128 Mat1, M128 Mat2) int isequalM256(M256 Mat1, M256 Mat2) { int i; - for (i = 0; i < 256; i++) - { + for (i = 0; i < 256; i++) { if (Mat1.M[i][0] != Mat2.M[i][0]) return 0; if (Mat1.M[i][1] != Mat2.M[i][1]) @@ -643,212 +625,198 @@ int isequalV256(V256 Vec1, V256 Vec2) return 0; return 1; } -int readbitM4(M4 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-3 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-3 */ +int readbitM4(M4 Mat, int i, int j) { if ((Mat.M[i] & idM4[j]) == idM4[j]) return 1; else return 0; } -int readbitM8(M8 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-7 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-7 */ +int readbitM8(M8 Mat, int i, int j) { if ((Mat.M[i] & idM8[j]) == idM8[j]) return 1; else return 0; } -int readbitM16(M16 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-15 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-15 */ +int readbitM16(M16 Mat, int i, int j) { if ((Mat.M[i] & idM16[j]) == idM16[j]) return 1; else return 0; } -int readbitM32(M32 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-31 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-31 */ +int readbitM32(M32 Mat, int i, int j) { if ((Mat.M[i] & idM32[j]) == idM32[j]) return 1; else return 0; } -int readbitM64(M64 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-63 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-63 */ +int readbitM64(M64 Mat, int i, int j) { if ((Mat.M[i] & idM64[j]) == idM64[j]) return 1; else return 0; } -int readbitM128(M128 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-127 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-127 */ +int readbitM128(M128 Mat, int i, int j) { - if (j < 64) - { + if (j < 64) { if ((Mat.M[i][0] & idM64[j]) == idM64[j]) return 1; else return 0; - } - else - { + } else { if ((Mat.M[i][1] & idM64[j - 64]) == idM64[j - 64]) return 1; else return 0; } } -int readbitM256(M256 Mat, int i, int j) // read one bit in a matrix, i in n rows, j in n columns, i,j: 0-255 +/* read one bit in a matrix, i in n rows, j in n columns, i,j: 0-255 */ +int readbitM256(M256 Mat, int i, int j) { - if (j < 64) - { + if (j < 64) { if ((Mat.M[i][0] & idM64[j]) == idM64[j]) return 1; else return 0; - } - else if (j < 128) - { + } else if (j < 128) { if ((Mat.M[i][1] & idM64[j - 64]) == idM64[j - 64]) return 1; else return 0; - } - else if (j < 192) - { + } else if (j < 192) { if ((Mat.M[i][2] & idM64[j - 128]) == idM64[j - 128]) return 1; else return 0; - } - else - { + } else { if ((Mat.M[i][3] & idM64[j - 192]) == idM64[j - 192]) return 1; else return 0; } } -void flipbitM4(M4 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM4(M4 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { (*Mat).M[i] ^= idM4[j]; } -void flipbitM8(M8 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM8(M8 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { (*Mat).M[i] ^= idM8[j]; } -void flipbitM16(M16 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM16(M16 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { (*Mat).M[i] ^= idM16[j]; } -void flipbitM32(M32 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM32(M32 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { (*Mat).M[i] ^= idM32[j]; } -void flipbitM64(M64 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM64(M64 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { (*Mat).M[i] ^= idM64[j]; } -void flipbitM128(M128 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM128(M128 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { - if (j < 64) - { + if (j < 64) { (*Mat).M[i][0] ^= idM64[j]; - } - else - { + } else { (*Mat).M[i][1] ^= idM64[j - 64]; } } -void flipbitM256(M256 *Mat, int i, int j) // flip (i, j) bit in a matrix +void flipbitM256(M256 *Mat, int i, int j) /* flip (i, j) bit in a matrix */ { - if (j < 64) - { + if (j < 64) { (*Mat).M[i][0] ^= idM64[j]; - } - else if (j < 128) - { + } else if (j < 128) { (*Mat).M[i][1] ^= idM64[j - 64]; - } - else if (j < 192) - { + } else if (j < 192) { (*Mat).M[i][2] ^= idM64[j - 128]; - } - else - { + } else { (*Mat).M[i][3] ^= idM64[j - 192]; } } -void setbitM4(M4 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM4(M4 *Mat, int i, int j, int bit) { if (readbitM4(*Mat, i, j) == bit) return; else flipbitM4(Mat, i, j); } -void setbitM8(M8 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM8(M8 *Mat, int i, int j, int bit) { if (readbitM8(*Mat, i, j) == bit) return; else flipbitM8(Mat, i, j); } -void setbitM16(M16 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM16(M16 *Mat, int i, int j, int bit) { if (readbitM16(*Mat, i, j) == bit) return; else flipbitM16(Mat, i, j); } -void setbitM32(M32 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM32(M32 *Mat, int i, int j, int bit) { if (readbitM32(*Mat, i, j) == bit) return; else flipbitM32(Mat, i, j); } -void setbitM64(M64 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM64(M64 *Mat, int i, int j, int bit) { if (readbitM64(*Mat, i, j) == bit) return; else flipbitM64(Mat, i, j); } -void setbitM128(M128 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM128(M128 *Mat, int i, int j, int bit) { if (readbitM128(*Mat, i, j) == bit) return; else flipbitM128(Mat, i, j); } -void setbitM256(M256 *Mat, int i, int j, int bit) // set (i, j) bit in a matrix, bit = 0/1 +/* set (i, j) bit in a matrix, bit = 0/1 */ +void setbitM256(M256 *Mat, int i, int j, int bit) { if (readbitM256(*Mat, i, j) == bit) return; else flipbitM256(Mat, i, j); } -int isinvertM4(M4 Mat) // Invertible Matrix? +int isinvertM4(M4 Mat) /* Invertible Matrix? */ { int i, j, k; uint8_t temp; int flag; - for (i = 0; i < 4; i++) - { - if ((Mat.M[i] & idM4[i]) == idM4[i]) - { - for (j = i + 1; j < 4; j++) - { - if ((Mat.M[j] & idM4[i]) == idM4[i]) - { + for (i = 0; i < 4; i++) { + if ((Mat.M[i] & idM4[i]) == idM4[i]) { + for (j = i + 1; j < 4; j++) { + if ((Mat.M[j] & idM4[i]) == idM4[i]) { Mat.M[j] ^= Mat.M[i]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 4; j++) - { - if ((Mat.M[j] & idM4[i]) == idM4[i]) - { + for (j = i + 1; j < 4; j++) { + if ((Mat.M[j] & idM4[i]) == idM4[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -858,10 +826,8 @@ int isinvertM4(M4 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 4; k++) - { - if ((Mat.M[k] & idM4[i]) == idM4[i]) - { + for (k = i + 1; k < 4; k++) { + if ((Mat.M[k] & idM4[i]) == idM4[i]) { Mat.M[k] ^= Mat.M[i]; } } @@ -872,30 +838,22 @@ int isinvertM4(M4 Mat) // Invertible Matrix? else return 0; } -int isinvertM8(M8 Mat) // Invertible Matrix? +int isinvertM8(M8 Mat) /* Invertible Matrix? */ { int i, j, k; uint8_t temp; int flag; - for (i = 0; i < 8; i++) - { - if ((Mat.M[i] & idM8[i]) == idM8[i]) - { - for (j = i + 1; j < 8; j++) - { - if ((Mat.M[j] & idM8[i]) == idM8[i]) - { + for (i = 0; i < 8; i++) { + if ((Mat.M[i] & idM8[i]) == idM8[i]) { + for (j = i + 1; j < 8; j++) { + if ((Mat.M[j] & idM8[i]) == idM8[i]) { Mat.M[j] ^= Mat.M[i]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 8; j++) - { - if ((Mat.M[j] & idM8[i]) == idM8[i]) - { + for (j = i + 1; j < 8; j++) { + if ((Mat.M[j] & idM8[i]) == idM8[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -905,10 +863,8 @@ int isinvertM8(M8 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 8; k++) - { - if ((Mat.M[k] & idM8[i]) == idM8[i]) - { + for (k = i + 1; k < 8; k++) { + if ((Mat.M[k] & idM8[i]) == idM8[i]) { Mat.M[k] ^= Mat.M[i]; } } @@ -919,30 +875,22 @@ int isinvertM8(M8 Mat) // Invertible Matrix? else return 0; } -int isinvertM16(M16 Mat) // Invertible Matrix? +int isinvertM16(M16 Mat) /* Invertible Matrix? */ { int i, j, k; uint16_t temp; int flag; - for (i = 0; i < 16; i++) - { - if ((Mat.M[i] & idM16[i]) == idM16[i]) - { - for (j = i + 1; j < 16; j++) - { - if ((Mat.M[j] & idM16[i]) == idM16[i]) - { + for (i = 0; i < 16; i++) { + if ((Mat.M[i] & idM16[i]) == idM16[i]) { + for (j = i + 1; j < 16; j++) { + if ((Mat.M[j] & idM16[i]) == idM16[i]) { Mat.M[j] ^= Mat.M[i]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 16; j++) - { - if ((Mat.M[j] & idM16[i]) == idM16[i]) - { + for (j = i + 1; j < 16; j++) { + if ((Mat.M[j] & idM16[i]) == idM16[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -952,10 +900,8 @@ int isinvertM16(M16 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 16; k++) - { - if ((Mat.M[k] & idM16[i]) == idM16[i]) - { + for (k = i + 1; k < 16; k++) { + if ((Mat.M[k] & idM16[i]) == idM16[i]) { Mat.M[k] ^= Mat.M[i]; } } @@ -966,30 +912,22 @@ int isinvertM16(M16 Mat) // Invertible Matrix? else return 0; } -int isinvertM32(M32 Mat) // Invertible Matrix? +int isinvertM32(M32 Mat) /* Invertible Matrix? */ { int i, j, k; uint32_t temp; int flag; - for (i = 0; i < 32; i++) - { - if ((Mat.M[i] & idM32[i]) == idM32[i]) - { - for (j = i + 1; j < 32; j++) - { - if ((Mat.M[j] & idM32[i]) == idM32[i]) - { + for (i = 0; i < 32; i++) { + if ((Mat.M[i] & idM32[i]) == idM32[i]) { + for (j = i + 1; j < 32; j++) { + if ((Mat.M[j] & idM32[i]) == idM32[i]) { Mat.M[j] ^= Mat.M[i]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 32; j++) - { - if ((Mat.M[j] & idM32[i]) == idM32[i]) - { + for (j = i + 1; j < 32; j++) { + if ((Mat.M[j] & idM32[i]) == idM32[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -999,10 +937,8 @@ int isinvertM32(M32 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 32; k++) - { - if ((Mat.M[k] & idM32[i]) == idM32[i]) - { + for (k = i + 1; k < 32; k++) { + if ((Mat.M[k] & idM32[i]) == idM32[i]) { Mat.M[k] ^= Mat.M[i]; } } @@ -1013,30 +949,22 @@ int isinvertM32(M32 Mat) // Invertible Matrix? else return 0; } -int isinvertM64(M64 Mat) // Invertible Matrix? +int isinvertM64(M64 Mat) /* Invertible Matrix? */ { int i, j, k; uint64_t temp; int flag; - for (i = 0; i < 64; i++) - { - if ((Mat.M[i] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 64; j++) - { - if ((Mat.M[j] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { + if ((Mat.M[i] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 64; j++) { + if ((Mat.M[j] & idM64[i]) == idM64[i]) { Mat.M[j] ^= Mat.M[i]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 64; j++) - { - if ((Mat.M[j] & idM64[i]) == idM64[i]) - { + for (j = i + 1; j < 64; j++) { + if ((Mat.M[j] & idM64[i]) == idM64[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -1046,10 +974,8 @@ int isinvertM64(M64 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 64; k++) - { - if ((Mat.M[k] & idM64[i]) == idM64[i]) - { + for (k = i + 1; k < 64; k++) { + if ((Mat.M[k] & idM64[i]) == idM64[i]) { Mat.M[k] ^= Mat.M[i]; } } @@ -1060,31 +986,23 @@ int isinvertM64(M64 Mat) // Invertible Matrix? else return 0; } -int isinvertM128(M128 Mat) // Invertible Matrix? +int isinvertM128(M128 Mat) /* Invertible Matrix? */ { int i, j, k; uint64_t temp; int flag; - for (i = 0; i < 64; i++) - { - if ((Mat.M[i][0] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { Mat.M[j][0] ^= Mat.M[i][0]; Mat.M[j][1] ^= Mat.M[i][1]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { temp = Mat.M[i][0]; Mat.M[i][0] = Mat.M[j][0]; Mat.M[j][0] = temp; @@ -1098,35 +1016,25 @@ int isinvertM128(M128 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 128; k++) - { - if ((Mat.M[k][0] & idM64[i]) == idM64[i]) - { + for (k = i + 1; k < 128; k++) { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) { Mat.M[k][0] ^= Mat.M[i][0]; Mat.M[k][1] ^= Mat.M[i][1]; } } } } - for (i = 64; i < 128; i++) - { - if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) - { - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 64; i < 128; i++) { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[j][1] ^= Mat.M[i][1]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { temp = Mat.M[i][1]; Mat.M[i][1] = Mat.M[j][1]; Mat.M[j][1] = temp; @@ -1136,10 +1044,8 @@ int isinvertM128(M128 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 128; k++) - { - if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (k = i + 1; k < 128; k++) { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[k][1] ^= Mat.M[i][1]; } } @@ -1150,33 +1056,25 @@ int isinvertM128(M128 Mat) // Invertible Matrix? else return 0; } -int isinvertM256(M256 Mat) // Invertible Matrix? +int isinvertM256(M256 Mat) /* Invertible Matrix? */ { int i, j, k; uint64_t temp; int flag; - for (i = 0; i < 64; i++) - { - if ((Mat.M[i][0] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { Mat.M[j][0] ^= Mat.M[i][0]; Mat.M[j][1] ^= Mat.M[i][1]; Mat.M[j][2] ^= Mat.M[i][2]; Mat.M[j][3] ^= Mat.M[i][3]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { temp = Mat.M[i][0]; Mat.M[i][0] = Mat.M[j][0]; Mat.M[j][0] = temp; @@ -1199,10 +1097,8 @@ int isinvertM256(M256 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][0] & idM64[i]) == idM64[i]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) { Mat.M[k][0] ^= Mat.M[i][0]; Mat.M[k][1] ^= Mat.M[i][1]; Mat.M[k][2] ^= Mat.M[i][2]; @@ -1211,27 +1107,19 @@ int isinvertM256(M256 Mat) // Invertible Matrix? } } } - for (i = 64; i < 128; i++) - { - if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 64; i < 128; i++) { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[j][1] ^= Mat.M[i][1]; Mat.M[j][2] ^= Mat.M[i][2]; Mat.M[j][3] ^= Mat.M[i][3]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { temp = Mat.M[i][1]; Mat.M[i][1] = Mat.M[j][1]; Mat.M[j][1] = temp; @@ -1250,10 +1138,8 @@ int isinvertM256(M256 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[k][1] ^= Mat.M[i][1]; Mat.M[k][2] ^= Mat.M[i][2]; Mat.M[k][3] ^= Mat.M[i][3]; @@ -1261,26 +1147,18 @@ int isinvertM256(M256 Mat) // Invertible Matrix? } } } - for (i = 128; i < 192; i++) - { - if ((Mat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (i = 128; i < 192; i++) { + if ((Mat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { Mat.M[j][2] ^= Mat.M[i][2]; Mat.M[j][3] ^= Mat.M[i][3]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { temp = Mat.M[i][2]; Mat.M[i][2] = Mat.M[j][2]; Mat.M[j][2] = temp; @@ -1295,35 +1173,25 @@ int isinvertM256(M256 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) { Mat.M[k][2] ^= Mat.M[i][2]; Mat.M[k][3] ^= Mat.M[i][3]; } } } } - for (i = 192; i < 256; i++) - { - if ((Mat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (i = 192; i < 256; i++) { + if ((Mat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { Mat.M[j][3] ^= Mat.M[i][3]; } } - } - else - { + } else { flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { temp = Mat.M[i][3]; Mat.M[i][3] = Mat.M[j][3]; Mat.M[j][3] = temp; @@ -1334,10 +1202,8 @@ int isinvertM256(M256 Mat) // Invertible Matrix? } if (flag) return 0; - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) { Mat.M[k][3] ^= Mat.M[i][3]; } } @@ -1348,30 +1214,22 @@ int isinvertM256(M256 Mat) // Invertible Matrix? else return 0; } -void invsM4(M4 Mat, M4 *Mat_inv) // compute the 4*4 inverse matrix +void invsM4(M4 Mat, M4 *Mat_inv) /* compute the 4*4 inverse matrix */ { int i, j, k; uint8_t temp; identityM4(Mat_inv); - for (i = 0; i < 4; i++) - { - if ((Mat.M[i] & idM4[i]) == idM4[i]) - { - for (j = i + 1; j < 4; j++) - { - if ((Mat.M[j] & idM4[i]) == idM4[i]) - { + for (i = 0; i < 4; i++) { + if ((Mat.M[i] & idM4[i]) == idM4[i]) { + for (j = i + 1; j < 4; j++) { + if ((Mat.M[j] & idM4[i]) == idM4[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } - } - else - { - for (j = i + 1; j < 4; j++) - { - if ((Mat.M[j] & idM4[i]) == idM4[i]) - { + } else { + for (j = i + 1; j < 4; j++) { + if ((Mat.M[j] & idM4[i]) == idM4[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -1382,52 +1240,39 @@ void invsM4(M4 Mat, M4 *Mat_inv) // compute the 4*4 inverse matrix break; } } - for (k = i + 1; k < 4; k++) - { - if ((Mat.M[k] & idM4[i]) == idM4[i]) - { + for (k = i + 1; k < 4; k++) { + if ((Mat.M[k] & idM4[i]) == idM4[i]) { Mat.M[k] ^= Mat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; } } } } - for (i = 3; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j] & idM4[i]) == idM4[i]) - { + for (i = 3; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j] & idM4[i]) == idM4[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } } } -void invsM8(M8 Mat, M8 *Mat_inv) // compute the 8*8 inverse matrix +void invsM8(M8 Mat, M8 *Mat_inv) /* compute the 8*8 inverse matrix */ { int i, j, k; uint8_t temp; identityM8(Mat_inv); - for (i = 0; i < 8; i++) - { - if ((Mat.M[i] & idM8[i]) == idM8[i]) - { - for (j = i + 1; j < 8; j++) - { - if ((Mat.M[j] & idM8[i]) == idM8[i]) - { + for (i = 0; i < 8; i++) { + if ((Mat.M[i] & idM8[i]) == idM8[i]) { + for (j = i + 1; j < 8; j++) { + if ((Mat.M[j] & idM8[i]) == idM8[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } - } - else - { - for (j = i + 1; j < 8; j++) - { - if ((Mat.M[j] & idM8[i]) == idM8[i]) - { + } else { + for (j = i + 1; j < 8; j++) { + if ((Mat.M[j] & idM8[i]) == idM8[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -1438,52 +1283,39 @@ void invsM8(M8 Mat, M8 *Mat_inv) // compute the 8*8 inverse matrix break; } } - for (k = i + 1; k < 8; k++) - { - if ((Mat.M[k] & idM8[i]) == idM8[i]) - { + for (k = i + 1; k < 8; k++) { + if ((Mat.M[k] & idM8[i]) == idM8[i]) { Mat.M[k] ^= Mat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; } } } } - for (i = 7; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j] & idM8[i]) == idM8[i]) - { + for (i = 7; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j] & idM8[i]) == idM8[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } } } -void invsM16(M16 Mat, M16 *Mat_inv) // compute the 16*16 inverse matrix +void invsM16(M16 Mat, M16 *Mat_inv) /* compute the 16*16 inverse matrix */ { int i, j, k; uint16_t temp; identityM16(Mat_inv); - for (i = 0; i < 16; i++) - { - if ((Mat.M[i] & idM16[i]) == idM16[i]) - { - for (j = i + 1; j < 16; j++) - { - if ((Mat.M[j] & idM16[i]) == idM16[i]) - { + for (i = 0; i < 16; i++) { + if ((Mat.M[i] & idM16[i]) == idM16[i]) { + for (j = i + 1; j < 16; j++) { + if ((Mat.M[j] & idM16[i]) == idM16[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } - } - else - { - for (j = i + 1; j < 16; j++) - { - if ((Mat.M[j] & idM16[i]) == idM16[i]) - { + } else { + for (j = i + 1; j < 16; j++) { + if ((Mat.M[j] & idM16[i]) == idM16[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -1494,52 +1326,39 @@ void invsM16(M16 Mat, M16 *Mat_inv) // compute the 16*16 inverse matrix break; } } - for (k = i + 1; k < 16; k++) - { - if ((Mat.M[k] & idM16[i]) == idM16[i]) - { + for (k = i + 1; k < 16; k++) { + if ((Mat.M[k] & idM16[i]) == idM16[i]) { Mat.M[k] ^= Mat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; } } } } - for (i = 15; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j] & idM16[i]) == idM16[i]) - { + for (i = 15; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j] & idM16[i]) == idM16[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } } } -void invsM32(M32 Mat, M32 *Mat_inv) // compute the 32*32 inverse matrix +void invsM32(M32 Mat, M32 *Mat_inv) /* compute the 32*32 inverse matrix */ { int i, j, k; uint32_t temp; identityM32(Mat_inv); - for (i = 0; i < 32; i++) - { - if ((Mat.M[i] & idM32[i]) == idM32[i]) - { - for (j = i + 1; j < 32; j++) - { - if ((Mat.M[j] & idM32[i]) == idM32[i]) - { + for (i = 0; i < 32; i++) { + if ((Mat.M[i] & idM32[i]) == idM32[i]) { + for (j = i + 1; j < 32; j++) { + if ((Mat.M[j] & idM32[i]) == idM32[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } - } - else - { - for (j = i + 1; j < 32; j++) - { - if ((Mat.M[j] & idM32[i]) == idM32[i]) - { + } else { + for (j = i + 1; j < 32; j++) { + if ((Mat.M[j] & idM32[i]) == idM32[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -1550,52 +1369,39 @@ void invsM32(M32 Mat, M32 *Mat_inv) // compute the 32*32 inverse matrix break; } } - for (k = i + 1; k < 32; k++) - { - if ((Mat.M[k] & idM32[i]) == idM32[i]) - { + for (k = i + 1; k < 32; k++) { + if ((Mat.M[k] & idM32[i]) == idM32[i]) { Mat.M[k] ^= Mat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; } } } } - for (i = 31; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j] & idM32[i]) == idM32[i]) - { + for (i = 31; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j] & idM32[i]) == idM32[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } } } -void invsM64(M64 Mat, M64 *Mat_inv) // compute the 64*64 inverse matrix +void invsM64(M64 Mat, M64 *Mat_inv) /* compute the 64*64 inverse matrix */ { int i, j, k; uint64_t temp; identityM64(Mat_inv); - for (i = 0; i < 64; i++) - { - if ((Mat.M[i] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 64; j++) - { - if ((Mat.M[j] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { + if ((Mat.M[i] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 64; j++) { + if ((Mat.M[j] & idM64[i]) == idM64[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } - } - else - { - for (j = i + 1; j < 64; j++) - { - if ((Mat.M[j] & idM64[i]) == idM64[i]) - { + } else { + for (j = i + 1; j < 64; j++) { + if ((Mat.M[j] & idM64[i]) == idM64[i]) { temp = Mat.M[i]; Mat.M[i] = Mat.M[j]; Mat.M[j] = temp; @@ -1606,41 +1412,32 @@ void invsM64(M64 Mat, M64 *Mat_inv) // compute the 64*64 inverse matrix break; } } - for (k = i + 1; k < 64; k++) - { - if ((Mat.M[k] & idM64[i]) == idM64[i]) - { + for (k = i + 1; k < 64; k++) { + if ((Mat.M[k] & idM64[i]) == idM64[i]) { Mat.M[k] ^= Mat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; } } } } - for (i = 63; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j] & idM64[i]) == idM64[i]) - { + for (i = 63; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j] & idM64[i]) == idM64[i]) { Mat.M[j] ^= Mat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; } } } } -void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix +void invsM128(M128 Mat, M128 *Mat_inv) /* compute the 128*128 inverse matrix */ { int i, j, k; uint64_t temp; identityM128(Mat_inv); - for (i = 0; i < 64; i++) - { - if ((Mat.M[i][0] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { Mat.M[j][0] ^= Mat.M[i][0]; Mat.M[j][1] ^= Mat.M[i][1]; @@ -1648,13 +1445,9 @@ void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; } } - } - else - { - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + } else { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { temp = Mat.M[i][0]; Mat.M[i][0] = Mat.M[j][0]; Mat.M[j][0] = temp; @@ -1673,10 +1466,8 @@ void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix break; } } - for (k = i + 1; k < 128; k++) - { - if ((Mat.M[k][0] & idM64[i]) == idM64[i]) - { + for (k = i + 1; k < 128; k++) { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) { Mat.M[k][0] ^= Mat.M[i][0]; Mat.M[k][1] ^= Mat.M[i][1]; @@ -1686,27 +1477,19 @@ void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix } } } - for (i = 64; i < 128; i++) - { - if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) - { - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 64; i < 128; i++) { + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[j][1] ^= Mat.M[i][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; } } - } - else - { - for (j = i + 1; j < 128; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + } else { + for (j = i + 1; j < 128; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { temp = Mat.M[i][1]; Mat.M[i][1] = Mat.M[j][1]; Mat.M[j][1] = temp; @@ -1721,10 +1504,8 @@ void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix break; } } - for (k = i + 1; k < 128; k++) - { - if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (k = i + 1; k < 128; k++) { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[k][1] ^= Mat.M[i][1]; (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; @@ -1733,24 +1514,18 @@ void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix } } } - for (i = 127; i >= 64; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 127; i >= 64; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[j][1] ^= Mat.M[i][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; } } } - for (i = 63; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 63; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { Mat.M[j][0] ^= Mat.M[i][0]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; (*Mat_inv).M[j][1] ^= (*Mat_inv).M[i][1]; @@ -1758,19 +1533,15 @@ void invsM128(M128 Mat, M128 *Mat_inv) // compute the 128*128 inverse matrix } } } -void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix +void invsM256(M256 Mat, M256 *Mat_inv) /* compute the 256*256 inverse matrix */ { int i, j, k; uint64_t temp; identityM256(Mat_inv); - for (i = 0; i < 64; i++) // diagonal = 1? - { - if ((Mat.M[i][0] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { /* diagonal = 1? */ + if ((Mat.M[i][0] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { Mat.M[j][0] ^= Mat.M[i][0]; Mat.M[j][1] ^= Mat.M[i][1]; Mat.M[j][2] ^= Mat.M[i][2]; @@ -1782,13 +1553,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; } } - } - else // swap to find 1 - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + } else { /* swap to find 1 */ + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { temp = Mat.M[i][0]; Mat.M[i][0] = Mat.M[j][0]; Mat.M[j][0] = temp; @@ -1823,10 +1590,8 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix break; } } - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][0] & idM64[i]) == idM64[i]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][0] & idM64[i]) == idM64[i]) { Mat.M[k][0] ^= Mat.M[i][0]; Mat.M[k][1] ^= Mat.M[i][1]; Mat.M[k][2] ^= Mat.M[i][2]; @@ -1840,14 +1605,10 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 64; i < 128; i++) // diagonal = 1? - { - if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 64; i < 128; i++) { /* diagonal = 1? */ + if ((Mat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[j][1] ^= Mat.M[i][1]; Mat.M[j][2] ^= Mat.M[i][2]; Mat.M[j][3] ^= Mat.M[i][3]; @@ -1858,13 +1619,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; } } - } - else // swap to find 1 - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + } else { /* swap to find 1 */ + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { temp = Mat.M[i][1]; Mat.M[i][1] = Mat.M[j][1]; Mat.M[j][1] = temp; @@ -1895,10 +1652,8 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix break; } } - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[k][1] ^= Mat.M[i][1]; Mat.M[k][2] ^= Mat.M[i][2]; Mat.M[k][3] ^= Mat.M[i][3]; @@ -1911,14 +1666,10 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 128; i < 192; i++) // diagonal = 1? - { - if ((Mat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (i = 128; i < 192; i++) { /* diagonal = 1? */ + if ((Mat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { Mat.M[j][2] ^= Mat.M[i][2]; Mat.M[j][3] ^= Mat.M[i][3]; @@ -1928,13 +1679,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; } } - } - else // swap to find 1 - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + } else { /* swap to find 1 */ + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { temp = Mat.M[i][2]; Mat.M[i][2] = Mat.M[j][2]; Mat.M[j][2] = temp; @@ -1961,10 +1708,8 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix break; } } - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) { Mat.M[k][2] ^= Mat.M[i][2]; Mat.M[k][3] ^= Mat.M[i][3]; @@ -1976,14 +1721,10 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 192; i < 256; i++) // diagonal = 1? - { - if ((Mat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (i = 192; i < 256; i++) { /* diagonal = 1? */ + if ((Mat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) { + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { Mat.M[j][3] ^= Mat.M[i][3]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -1992,13 +1733,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix (*Mat_inv).M[j][3] ^= (*Mat_inv).M[i][3]; } } - } - else // swap to find 1 - { - for (j = i + 1; j < 256; j++) - { - if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + } else { /* swap to find 1 */ + for (j = i + 1; j < 256; j++) { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { temp = Mat.M[i][3]; Mat.M[i][3] = Mat.M[j][3]; Mat.M[j][3] = temp; @@ -2021,10 +1758,8 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix break; } } - for (k = i + 1; k < 256; k++) - { - if ((Mat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (k = i + 1; k < 256; k++) { + if ((Mat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) { Mat.M[k][3] ^= Mat.M[i][3]; (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; @@ -2035,12 +1770,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 255; i >= 192; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (i = 255; i >= 192; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { Mat.M[j][3] ^= Mat.M[i][3]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -2050,12 +1782,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 191; i >= 128; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (i = 191; i >= 128; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { Mat.M[j][2] ^= Mat.M[i][2]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -2065,12 +1794,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 127; i >= 64; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 127; i >= 64; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { Mat.M[j][1] ^= Mat.M[i][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -2080,12 +1806,9 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } - for (i = 63; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((Mat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 63; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((Mat.M[j][0] & idM64[i]) == idM64[i]) { Mat.M[j][0] ^= Mat.M[i][0]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -2096,65 +1819,66 @@ void invsM256(M256 Mat, M256 *Mat_inv) // compute the 256*256 inverse matrix } } } -uint8_t affineU4(Aff4 aff, uint8_t arr) // 4bits affine transformation +uint8_t affineU4(Aff4 aff, uint8_t arr) /* 4bits affine transformation */ { V4 mul_vec, ans_vec; mul_vec.V = arr; - MatMulVecM4(aff.Mat, mul_vec, &ans_vec); // mul - return ans_vec.V ^ aff.Vec.V; // add + MatMulVecM4(aff.Mat, mul_vec, &ans_vec); /* mul */ + return ans_vec.V ^ aff.Vec.V; /* add */ } -uint8_t affineU8(Aff8 aff, uint8_t arr) // 8bits affine transformation +uint8_t affineU8(Aff8 aff, uint8_t arr) /* 8bits affine transformation */ { V8 mul_vec, ans_vec; mul_vec.V = arr; - MatMulVecM8(aff.Mat, mul_vec, &ans_vec); // mul - return ans_vec.V ^ aff.Vec.V; // add + MatMulVecM8(aff.Mat, mul_vec, &ans_vec); /* mul */ + return ans_vec.V ^ aff.Vec.V; /* add */ } -uint16_t affineU16(Aff16 aff, uint16_t arr) // 16bits affine transformation +uint16_t affineU16(Aff16 aff, uint16_t arr) /* 16bits affine transformation */ { V16 mul_vec, ans_vec; mul_vec.V = arr; - MatMulVecM16(aff.Mat, mul_vec, &ans_vec); // mul - return ans_vec.V ^ aff.Vec.V; // add + MatMulVecM16(aff.Mat, mul_vec, &ans_vec); /* mul */ + return ans_vec.V ^ aff.Vec.V; /* add */ } -uint32_t affineU32(Aff32 aff, uint32_t arr) // 32bits affine transformation +uint32_t affineU32(Aff32 aff, uint32_t arr) /* 32bits affine transformation */ { V32 mul_vec, ans_vec; mul_vec.V = arr; - MatMulVecM32(aff.Mat, mul_vec, &ans_vec); // mul - return ans_vec.V ^ aff.Vec.V; // add + MatMulVecM32(aff.Mat, mul_vec, &ans_vec); /* mul */ + return ans_vec.V ^ aff.Vec.V; /* add */ } -uint64_t affineU64(Aff64 aff, uint64_t arr) // 64bits affine transformation +uint64_t affineU64(Aff64 aff, uint64_t arr) /* 64bits affine transformation */ { V64 mul_vec, ans_vec; mul_vec.V = arr; - MatMulVecM64(aff.Mat, mul_vec, &ans_vec); // mul - return ans_vec.V ^ aff.Vec.V; // add + MatMulVecM64(aff.Mat, mul_vec, &ans_vec); /* mul */ + return ans_vec.V ^ aff.Vec.V; /* add */ } -void affineU128(Aff128 aff, uint64_t arr[], uint64_t ans[]) // 128bits affine transformation +/* 128bits affine transformation */ +void affineU128(Aff128 aff, uint64_t arr[], uint64_t ans[]) { V128 mul_vec, ans_vec; mul_vec.V[0] = arr[0]; mul_vec.V[1] = arr[1]; - MatMulVecM128(aff.Mat, mul_vec, &ans_vec); // mul - ans[0] = ans_vec.V[0] ^ aff.Vec.V[0]; // add + MatMulVecM128(aff.Mat, mul_vec, &ans_vec); /* mul */ + ans[0] = ans_vec.V[0] ^ aff.Vec.V[0]; /* add */ ans[1] = ans_vec.V[1] ^ aff.Vec.V[1]; } -int xorU4(uint8_t n) // 4bits internal xor +int xorU4(uint8_t n) /* 4bits internal xor */ { if (xor[n]) return 1; else return 0; } -int xorU8(uint8_t n) // uint8_t internal xor +int xorU8(uint8_t n) /* uint8_t internal xor */ { if (xor[n]) return 1; else return 0; } -int xorU16(uint16_t n) // uint16_t internal xor +int xorU16(uint16_t n) /* uint16_t internal xor */ { uint8_t temp = 0; uint8_t *u = (uint8_t *)&n; @@ -2164,7 +1888,7 @@ int xorU16(uint16_t n) // uint16_t internal xor else return 0; } -int xorU32(uint32_t n) // uint32_t internal xor +int xorU32(uint32_t n) /* uint32_t internal xor */ { uint16_t temp = 0; uint16_t *u = (uint16_t *)&n; @@ -2174,7 +1898,7 @@ int xorU32(uint32_t n) // uint32_t internal xor else return 0; } -int xorU64(uint64_t n) // uint64_t internal xor +int xorU64(uint64_t n) /* uint64_t internal xor */ { uint32_t temp = 0; uint32_t *u = (uint32_t *)&n; @@ -2184,7 +1908,7 @@ int xorU64(uint64_t n) // uint64_t internal xor else return 0; } -int xorU128(uint64_t n[]) // uint128_t internal xor +int xorU128(uint64_t n[]) /* uint128_t internal xor */ { uint64_t temp = 0; temp = n[0] ^ n[1]; @@ -2193,7 +1917,7 @@ int xorU128(uint64_t n[]) // uint128_t internal xor else return 0; } -int xorU256(uint64_t n[]) // uint256_t internal xor +int xorU256(uint64_t n[]) /* uint256_t internal xor */ { uint64_t temp = 0; temp = n[0] ^ n[1] ^ n[2] ^ n[3]; @@ -2202,63 +1926,61 @@ int xorU256(uint64_t n[]) // uint256_t internal xor else return 0; } -int HWU4(uint8_t n) // 4bits HW +int HWU4(uint8_t n) /* 4bits HW */ { return HW[n]; } -int HWU8(uint8_t n) // uint8_t HW +int HWU8(uint8_t n) /* uint8_t HW */ { return HW[n]; } -int HWU16(uint16_t n) // uint16_t HW +int HWU16(uint16_t n) /* uint16_t HW */ { uint8_t *u = (uint8_t *)&n; return HWU8(*u) + HWU8(*(u + 1)); } -int HWU32(uint32_t n) // uint32_t HW +int HWU32(uint32_t n) /* uint32_t HW */ { uint16_t *u = (uint16_t *)&n; return HWU16(*u) + HWU16(*(u + 1)); } -int HWU64(uint64_t n) // uint64_t HW +int HWU64(uint64_t n) /* uint64_t HW */ { uint32_t *u = (uint32_t *)&n; return HWU32(*u) + HWU32(*(u + 1)); } -int HWU128(uint64_t n[]) // uint128_t HW +int HWU128(uint64_t n[]) /* uint128_t HW */ { return HWU64(n[0]) + HWU64(n[1]); } -void printU8(uint8_t n) // printf uint8_t +void printU8(uint8_t n) /* printf uint8_t */ { printf("0x%x\n", n); } -void printU16(uint16_t n) // printf uint16_t +void printU16(uint16_t n) /* printf uint16_t */ { printf("0x%x\n", n); } -void printU32(uint32_t n) // printf uint32_t +void printU32(uint32_t n) /* printf uint32_t */ { printf("0x%x\n", n); } -void printU64(uint64_t n) // printf uint64_t +void printU64(uint64_t n) /* printf uint64_t */ { printf("0x%" PRIx64 "\n", n); } -void printU128(uint64_t n[]) // printf uint128_t +void printU128(uint64_t n[]) /* printf uint128_t */ { printf("0x%" PRIx64 " ", n[0]); printf("0x%" PRIx64 "\n", n[1]); } -void printbitM4(M4 Mat) // printf Matrix 4*4 in the form of bits +void printbitM4(M4 Mat) /* printf Matrix 4*4 in the form of bits */ { int i, j; uint8_t temp; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { temp = Mat.M[i]; - for (j = 0; j < 4; j++) - { + for (j = 0; j < 4; j++) { if (temp & 0x08) printf("%d ", 1); else @@ -2269,15 +1991,13 @@ void printbitM4(M4 Mat) // printf Matrix 4*4 in the form of bits } printf("\n"); } -void printbitM8(M8 Mat) // printf Matrix 8*8 in the form of bits +void printbitM8(M8 Mat) /* printf Matrix 8*8 in the form of bits */ { int i, j; uint8_t temp; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { temp = Mat.M[i]; - for (j = 0; j < 8; j++) - { + for (j = 0; j < 8; j++) { if (temp & 0x80) printf("%d ", 1); else @@ -2288,15 +2008,13 @@ void printbitM8(M8 Mat) // printf Matrix 8*8 in the form of bits } printf("\n"); } -void printbitM16(M16 Mat) // printf Matrix 16*16 in the form of bits +void printbitM16(M16 Mat) /* printf Matrix 16*16 in the form of bits */ { int i, j; uint16_t temp; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { temp = Mat.M[i]; - for (j = 0; j < 16; j++) - { + for (j = 0; j < 16; j++) { if (temp & 0x8000) printf("%d ", 1); else @@ -2307,15 +2025,13 @@ void printbitM16(M16 Mat) // printf Matrix 16*16 in the form of bits } printf("\n"); } -void printbitM32(M32 Mat) // printf Matrix 32*32 in the form of bits +void printbitM32(M32 Mat) /* printf Matrix 32*32 in the form of bits */ { int i, j; uint32_t temp; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { temp = Mat.M[i]; - for (j = 0; j < 32; j++) - { + for (j = 0; j < 32; j++) { if (temp & 0x80000000) printf("%d ", 1); else @@ -2326,15 +2042,13 @@ void printbitM32(M32 Mat) // printf Matrix 32*32 in the form of bits } printf("\n"); } -void printbitM64(M64 Mat) // printf Matrix 64*64 in the form of bits +void printbitM64(M64 Mat) /* printf Matrix 64*64 in the form of bits */ { int i, j; uint64_t temp; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { temp = Mat.M[i]; - for (j = 0; j < 64; j++) - { + for (j = 0; j < 64; j++) { if (temp & 0x8000000000000000) printf("%d ", 1); else @@ -2345,15 +2059,13 @@ void printbitM64(M64 Mat) // printf Matrix 64*64 in the form of bits } printf("\n"); } -void printbitM128(M128 Mat) // printf Matrix 128*128 in the form of bits +void printbitM128(M128 Mat) /* printf Matrix 128*128 in the form of bits */ { int i, j; uint64_t temp; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { temp = Mat.M[i][0]; - for (j = 0; j < 64; j++) - { + for (j = 0; j < 64; j++) { if (temp & 0x8000000000000000) printf("%d ", 1); else @@ -2361,8 +2073,7 @@ void printbitM128(M128 Mat) // printf Matrix 128*128 in the form of bits temp = temp << 1; } temp = Mat.M[i][1]; - for (j = 0; j < 64; j++) - { + for (j = 0; j < 64; j++) { if (temp & 0x8000000000000000) printf("%d ", 1); else @@ -2405,138 +2116,133 @@ void VecAddVecV256(V256 Vec1, V256 Vec2, V256 *Vec) (*Vec).V[2] = Vec1.V[2] ^ Vec2.V[2]; (*Vec).V[3] = Vec1.V[3] ^ Vec2.V[3]; } -uint8_t MatMulNumM4(M4 Mat, uint8_t n) // matrix * number -> number 4bits +uint8_t MatMulNumM4(M4 Mat, uint8_t n) /* matrix * number -> number 4bits */ { int i; uint8_t temp = 0; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { if (xorU4(Mat.M[i] & n & 0x0f)) temp ^= idM4[i]; } return temp; } -uint8_t MatMulNumM8(M8 Mat, uint8_t n) // matrix * number -> number 8bits +uint8_t MatMulNumM8(M8 Mat, uint8_t n) /* matrix * number -> number 8bits */ { int i; uint8_t temp = 0; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { if (xorU8(Mat.M[i] & n)) temp ^= idM8[i]; } return temp; } -uint16_t MatMulNumM16(M16 Mat, uint16_t n) // matrix * number -> number 16bits +/* matrix * number -> number 16bits */ +uint16_t MatMulNumM16(M16 Mat, uint16_t n) { int i; uint16_t temp = 0; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { if (xorU16(Mat.M[i] & n)) temp ^= idM16[i]; } return temp; } -uint32_t MatMulNumM32(M32 Mat, uint32_t n) // matrix * number -> number 32bits +/* matrix * number -> number 32bits */ +uint32_t MatMulNumM32(M32 Mat, uint32_t n) { int i; uint32_t temp = 0; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { if (xorU32(Mat.M[i] & n)) temp ^= idM32[i]; } return temp; } -uint64_t MatMulNumM64(M64 Mat, uint64_t n) // matrix * number -> number 64bits +/* matrix * number -> number 64bits */ +uint64_t MatMulNumM64(M64 Mat, uint64_t n) { int i; uint64_t temp = 0; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { if (xorU64(Mat.M[i] & n)) temp ^= idM64[i]; } return temp; } -void MatMulVecM4(M4 Mat, V4 Vec, V4 *ans) // matrix * vector -> vector 4*1 +void MatMulVecM4(M4 Mat, V4 Vec, V4 *ans) /* matrix * vector -> vector 4*1 */ { int i; initV4(ans); - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { if (xorU4(Mat.M[i] & Vec.V & 0x0f)) (*ans).V ^= idM4[i]; } } -void MatMulVecM8(M8 Mat, V8 Vec, V8 *ans) // matrix * vector -> vector 8*1 +void MatMulVecM8(M8 Mat, V8 Vec, V8 *ans) /* matrix * vector -> vector 8*1 */ { int i; initV8(ans); - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { if (xorU8(Mat.M[i] & Vec.V)) (*ans).V ^= idM8[i]; } } -void MatMulVecM16(M16 Mat, V16 Vec, V16 *ans) // matrix * vector -> vector 16*1 +/* matrix * vector -> vector 16*1 */ +void MatMulVecM16(M16 Mat, V16 Vec, V16 *ans) { int i; initV16(ans); - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { if (xorU16(Mat.M[i] & Vec.V)) (*ans).V ^= idM16[i]; } } -void MatMulVecM32(M32 Mat, V32 Vec, V32 *ans) // matrix * vector -> vector 32*1 +/* matrix * vector -> vector 32*1 */ +void MatMulVecM32(M32 Mat, V32 Vec, V32 *ans) { int i; initV32(ans); - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { if (xorU32(Mat.M[i] & Vec.V)) (*ans).V ^= idM32[i]; } } -void MatMulVecM64(M64 Mat, V64 Vec, V64 *ans) // matrix * vector -> vector 64*1 +/* matrix * vector -> vector 64*1 */ +void MatMulVecM64(M64 Mat, V64 Vec, V64 *ans) { int i; initV64(ans); - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { if (xorU64(Mat.M[i] & Vec.V)) (*ans).V ^= idM64[i]; } } -void MatMulVecM128(M128 Mat, V128 Vec, V128 *ans) // matrix * vector -> vector 128*1 +/* matrix * vector -> vector 128*1 */ +void MatMulVecM128(M128 Mat, V128 Vec, V128 *ans) { int i; initV128(ans); uint64_t temp[2]; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { temp[0] = Mat.M[i][0] & Vec.V[0]; temp[1] = Mat.M[i][1] & Vec.V[1]; if (xorU128(temp)) (*ans).V[0] ^= idM64[i]; } - for (i = 64; i < 128; i++) - { + for (i = 64; i < 128; i++) { temp[0] = Mat.M[i][0] & Vec.V[0]; temp[1] = Mat.M[i][1] & Vec.V[1]; if (xorU128(temp)) (*ans).V[1] ^= idM64[i - 64]; } } -void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) // matrix * vector -> vector 256*1 +/* matrix * vector -> vector 256*1 */ +void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) { int i; initV256(ans); uint64_t temp[4]; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { temp[0] = Mat.M[i][0] & Vec.V[0]; temp[1] = Mat.M[i][1] & Vec.V[1]; temp[2] = Mat.M[i][2] & Vec.V[2]; @@ -2544,8 +2250,7 @@ void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) // matrix * vector -> vector 2 if (xorU256(temp)) (*ans).V[0] ^= idM64[i]; } - for (i = 64; i < 128; i++) - { + for (i = 64; i < 128; i++) { temp[0] = Mat.M[i][0] & Vec.V[0]; temp[1] = Mat.M[i][1] & Vec.V[1]; temp[2] = Mat.M[i][2] & Vec.V[2]; @@ -2553,8 +2258,7 @@ void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) // matrix * vector -> vector 2 if (xorU256(temp)) (*ans).V[1] ^= idM64[i - 64]; } - for (i = 128; i < 192; i++) - { + for (i = 128; i < 192; i++) { temp[0] = Mat.M[i][0] & Vec.V[0]; temp[1] = Mat.M[i][1] & Vec.V[1]; temp[2] = Mat.M[i][2] & Vec.V[2]; @@ -2562,8 +2266,7 @@ void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) // matrix * vector -> vector 2 if (xorU256(temp)) (*ans).V[2] ^= idM64[i - 128]; } - for (i = 192; i < 256; i++) - { + for (i = 192; i < 256; i++) { temp[0] = Mat.M[i][0] & Vec.V[0]; temp[1] = Mat.M[i][1] & Vec.V[1]; temp[2] = Mat.M[i][2] & Vec.V[2]; @@ -2572,30 +2275,27 @@ void MatMulVecM256(M256 Mat, V256 Vec, V256 *ans) // matrix * vector -> vector 2 (*ans).V[3] ^= idM64[i - 192]; } } -void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and its inverse matrix +/* generate 4*4 invertible matrix and its inverse matrix */ +void genMatpairM4(M4 *Mat, M4 *Mat_inv) { int i, j, t, k; int p; M4 tempMat; M4 resultMat; uint8_t temp; - uint8_t trail[16][3]; // generate trail + uint8_t trail[16][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM4(Mat); identityM4(Mat_inv); randM4(&tempMat); copyM4(tempMat, &resultMat); - for (i = 0; i < 4; i++) // diagonal = 1? - { - if ((tempMat.M[i] & idM4[i]) == idM4[i]) - { - for (j = i + 1; j < 4; j++) - { - if ((tempMat.M[j] & idM4[i]) == idM4[i]) - { + for (i = 0; i < 4; i++) { /* diagonal = 1? */ + if ((tempMat.M[i] & idM4[i]) == idM4[i]) { + for (j = i + 1; j < 4; j++) { + if ((tempMat.M[j] & idM4[i]) == idM4[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -2606,14 +2306,10 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 4; j++) - { - if ((tempMat.M[j] & idM4[i]) == idM4[i]) - { + for (j = i + 1; j < 4; j++) { + if ((tempMat.M[j] & idM4[i]) == idM4[i]) { temp = tempMat.M[i]; tempMat.M[i] = tempMat.M[j]; tempMat.M[j] = temp; @@ -2631,12 +2327,10 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 3) - { - p = i + 1 + cus_random() % (3 - i); // swap + if (i < 3) { + p = i + 1 + cus_random() % (3 - i); /* swap */ temp = tempMat.M[p]; tempMat.M[p] = tempMat.M[i]; tempMat.M[i] = temp; @@ -2647,10 +2341,8 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it trail[times][1] = p; trail[times][2] = i; times++; - for (t = i + 1; t < 4; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 4; t++) { + if (cus_random() % 2) { tempMat.M[t] ^= tempMat.M[i]; (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; trail[times][0] = 1; @@ -2660,13 +2352,9 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it } } } - } - else // can still contiune - { - for (k = i + 1; k < 4; k++) - { - if ((tempMat.M[k] & idM4[i]) == idM4[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 4; k++) { + if ((tempMat.M[k] & idM4[i]) == idM4[i]) { tempMat.M[k] ^= tempMat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; @@ -2680,14 +2368,10 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it } } } - if (!invertible) // not invertible - { - for (t = 3; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM4[t]) == idM4[t]) - { + if (!invertible) { /* not invertible */ + for (t = 3; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM4[t]) == idM4[t]) { tempMat.M[j] ^= tempMat.M[t]; (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; trail[times][0] = 1; @@ -2698,28 +2382,19 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]]; (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; (*Mat).M[trail[j][2]] = temp; } } - } - else // invertible - { - for (i = 3; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM4[i]) == idM4[i]) - { + } else { /* invertible */ + for (i = 3; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM4[i]) == idM4[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -2729,30 +2404,27 @@ void genMatpairM4(M4 *Mat, M4 *Mat_inv) // generate 4*4 invertible matrix and it copyM4(resultMat, Mat); } } -void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and its inverse matrix +/* generate 8*8 invertible matrix and its inverse matrix */ +void genMatpairM8(M8 *Mat, M8 *Mat_inv) { int i, j, t, k; int p; M8 tempMat; M8 resultMat; uint8_t temp; - uint8_t trail[64][3]; // generate trail + uint8_t trail[64][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM8(Mat); identityM8(Mat_inv); randM8(&tempMat); copyM8(tempMat, &resultMat); - for (i = 0; i < 8; i++) // diagonal = 1? - { - if ((tempMat.M[i] & idM8[i]) == idM8[i]) - { - for (j = i + 1; j < 8; j++) - { - if ((tempMat.M[j] & idM8[i]) == idM8[i]) - { + for (i = 0; i < 8; i++) { /* diagonal = 1? */ + if ((tempMat.M[i] & idM8[i]) == idM8[i]) { + for (j = i + 1; j < 8; j++) { + if ((tempMat.M[j] & idM8[i]) == idM8[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -2763,14 +2435,10 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 8; j++) - { - if ((tempMat.M[j] & idM8[i]) == idM8[i]) - { + for (j = i + 1; j < 8; j++) { + if ((tempMat.M[j] & idM8[i]) == idM8[i]) { temp = tempMat.M[i]; tempMat.M[i] = tempMat.M[j]; tempMat.M[j] = temp; @@ -2788,12 +2456,10 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 7) - { - p = i + 1 + cus_random() % (7 - i); // swap + if (i < 7) { + p = i + 1 + cus_random() % (7 - i); /* swap */ temp = tempMat.M[p]; tempMat.M[p] = tempMat.M[i]; tempMat.M[i] = temp; @@ -2804,10 +2470,8 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it trail[times][1] = p; trail[times][2] = i; times++; - for (t = i + 1; t < 8; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 8; t++) { + if (cus_random() % 2) { tempMat.M[t] ^= tempMat.M[i]; (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; trail[times][0] = 1; @@ -2817,13 +2481,9 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it } } } - } - else // can still contiune - { - for (k = i + 1; k < 8; k++) - { - if ((tempMat.M[k] & idM8[i]) == idM8[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 8; k++) { + if ((tempMat.M[k] & idM8[i]) == idM8[i]) { tempMat.M[k] ^= tempMat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; @@ -2837,14 +2497,10 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it } } } - if (!invertible) // not invertible - { - for (t = 7; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM8[t]) == idM8[t]) - { + if (!invertible) { /* not invertible */ + for (t = 7; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM8[t]) == idM8[t]) { tempMat.M[j] ^= tempMat.M[t]; (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; trail[times][0] = 1; @@ -2855,28 +2511,19 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]]; (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; (*Mat).M[trail[j][2]] = temp; } } - } - else // invertible - { - for (i = 7; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM8[i]) == idM8[i]) - { + } else { /* invertible */ + for (i = 7; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM8[i]) == idM8[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -2886,30 +2533,27 @@ void genMatpairM8(M8 *Mat, M8 *Mat_inv) // generate 8*8 invertible matrix and it copyM8(resultMat, Mat); } } -void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix and its inverse matrix +/* generate 16*16 invertible matrix and its inverse matrix */ +void genMatpairM16(M16 *Mat, M16 *Mat_inv) { int i, j, t, k; int p; M16 tempMat; M16 resultMat; uint16_t temp; - uint8_t trail[256][3]; // generate trail + uint8_t trail[256][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM16(Mat); identityM16(Mat_inv); randM16(&tempMat); copyM16(tempMat, &resultMat); - for (i = 0; i < 16; i++) // diagonal = 1? - { - if ((tempMat.M[i] & idM16[i]) == idM16[i]) - { - for (j = i + 1; j < 16; j++) - { - if ((tempMat.M[j] & idM16[i]) == idM16[i]) - { + for (i = 0; i < 16; i++) { /* diagonal = 1? */ + if ((tempMat.M[i] & idM16[i]) == idM16[i]) { + for (j = i + 1; j < 16; j++) { + if ((tempMat.M[j] & idM16[i]) == idM16[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -2920,14 +2564,10 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 16; j++) - { - if ((tempMat.M[j] & idM16[i]) == idM16[i]) - { + for (j = i + 1; j < 16; j++) { + if ((tempMat.M[j] & idM16[i]) == idM16[i]) { temp = tempMat.M[i]; tempMat.M[i] = tempMat.M[j]; tempMat.M[j] = temp; @@ -2945,12 +2585,10 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 15) - { - p = i + 1 + cus_random() % (15 - i); // swap + if (i < 15) { + p = i + 1 + cus_random() % (15 - i); /* swap */ temp = tempMat.M[p]; tempMat.M[p] = tempMat.M[i]; tempMat.M[i] = temp; @@ -2961,10 +2599,8 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a trail[times][1] = p; trail[times][2] = i; times++; - for (t = i + 1; t < 16; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 16; t++) { + if (cus_random() % 2) { tempMat.M[t] ^= tempMat.M[i]; (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; trail[times][0] = 1; @@ -2974,13 +2610,9 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a } } } - } - else // can still contiune - { - for (k = i + 1; k < 16; k++) - { - if ((tempMat.M[k] & idM16[i]) == idM16[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 16; k++) { + if ((tempMat.M[k] & idM16[i]) == idM16[i]) { tempMat.M[k] ^= tempMat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; @@ -2994,14 +2626,10 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a } } } - if (!invertible) // not invertible - { - for (t = 15; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM16[t]) == idM16[t]) - { + if (!invertible) { /* not invertible */ + for (t = 15; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM16[t]) == idM16[t]) { tempMat.M[j] ^= tempMat.M[t]; (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; trail[times][0] = 1; @@ -3012,28 +2640,19 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]]; (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; (*Mat).M[trail[j][2]] = temp; } } - } - else // invertible - { - for (i = 15; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM16[i]) == idM16[i]) - { + } else { /* invertible */ + for (i = 15; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM16[i]) == idM16[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -3043,30 +2662,27 @@ void genMatpairM16(M16 *Mat, M16 *Mat_inv) // generate 16*16 invertible matrix a copyM16(resultMat, Mat); } } -void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix and its inverse matrix +/* generate 32*32 invertible matrix and its inverse matrix */ +void genMatpairM32(M32 *Mat, M32 *Mat_inv) { int i, j, t, k; int p; M32 tempMat; M32 resultMat; uint32_t temp; - uint8_t trail[1024][3]; // generate trail + uint8_t trail[1024][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM32(Mat); identityM32(Mat_inv); randM32(&tempMat); copyM32(tempMat, &resultMat); - for (i = 0; i < 32; i++) // diagonal = 1? - { - if ((tempMat.M[i] & idM32[i]) == idM32[i]) - { - for (j = i + 1; j < 32; j++) - { - if ((tempMat.M[j] & idM32[i]) == idM32[i]) - { + for (i = 0; i < 32; i++) { /* diagonal = 1? */ + if ((tempMat.M[i] & idM32[i]) == idM32[i]) { + for (j = i + 1; j < 32; j++) { + if ((tempMat.M[j] & idM32[i]) == idM32[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -3077,14 +2693,10 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 32; j++) - { - if ((tempMat.M[j] & idM32[i]) == idM32[i]) - { + for (j = i + 1; j < 32; j++) { + if ((tempMat.M[j] & idM32[i]) == idM32[i]) { temp = tempMat.M[i]; tempMat.M[i] = tempMat.M[j]; tempMat.M[j] = temp; @@ -3102,12 +2714,10 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 31) - { - p = i + 1 + cus_random() % (31 - i); // swap + if (i < 31) { + p = i + 1 + cus_random() % (31 - i); /* swap */ temp = tempMat.M[p]; tempMat.M[p] = tempMat.M[i]; tempMat.M[i] = temp; @@ -3118,10 +2728,8 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a trail[times][1] = p; trail[times][2] = i; times++; - for (t = i + 1; t < 32; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 32; t++) { + if (cus_random() % 2) { tempMat.M[t] ^= tempMat.M[i]; (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; trail[times][0] = 1; @@ -3131,13 +2739,9 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a } } } - } - else // can still contiune - { - for (k = i + 1; k < 32; k++) - { - if ((tempMat.M[k] & idM32[i]) == idM32[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 32; k++) { + if ((tempMat.M[k] & idM32[i]) == idM32[i]) { tempMat.M[k] ^= tempMat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; @@ -3151,14 +2755,10 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a } } } - if (!invertible) // not invertible - { - for (t = 31; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM32[t]) == idM32[t]) - { + if (!invertible) { /* not invertible */ + for (t = 31; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM32[t]) == idM32[t]) { tempMat.M[j] ^= tempMat.M[t]; (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; trail[times][0] = 1; @@ -3169,28 +2769,19 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]]; (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; (*Mat).M[trail[j][2]] = temp; } } - } - else // invertible - { - for (i = 31; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM32[i]) == idM32[i]) - { + } else { /* invertible */ + for (i = 31; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM32[i]) == idM32[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -3200,30 +2791,27 @@ void genMatpairM32(M32 *Mat, M32 *Mat_inv) // generate 32*32 invertible matrix a copyM32(resultMat, Mat); } } -void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix and its inverse matrix +/* generate 64*64 invertible matrix and its inverse matrix */ +void genMatpairM64(M64 *Mat, M64 *Mat_inv) { int i, j, t, k; int p; M64 tempMat; M64 resultMat; uint64_t temp; - uint8_t trail[4096][3]; // generate trail + uint8_t trail[4096][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM64(Mat); identityM64(Mat_inv); randM64(&tempMat); copyM64(tempMat, &resultMat); - for (i = 0; i < 64; i++) // diagonal = 1? - { - if ((tempMat.M[i] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 64; j++) - { - if ((tempMat.M[j] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { /* diagonal = 1? */ + if ((tempMat.M[i] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 64; j++) { + if ((tempMat.M[j] & idM64[i]) == idM64[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -3234,14 +2822,10 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 64; j++) - { - if ((tempMat.M[j] & idM64[i]) == idM64[i]) - { + for (j = i + 1; j < 64; j++) { + if ((tempMat.M[j] & idM64[i]) == idM64[i]) { temp = tempMat.M[i]; tempMat.M[i] = tempMat.M[j]; tempMat.M[j] = temp; @@ -3259,12 +2843,10 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 63) - { - p = i + 1 + cus_random() % (63 - i); // swap + if (i < 63) { + p = i + 1 + cus_random() % (63 - i); /* swap */ temp = tempMat.M[p]; tempMat.M[p] = tempMat.M[i]; tempMat.M[i] = temp; @@ -3275,10 +2857,8 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a trail[times][1] = p; trail[times][2] = i; times++; - for (t = i + 1; t < 64; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 64; t++) { + if (cus_random() % 2) { tempMat.M[t] ^= tempMat.M[i]; (*Mat_inv).M[t] ^= (*Mat_inv).M[i]; trail[times][0] = 1; @@ -3288,13 +2868,9 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a } } } - } - else // can still contiune - { - for (k = i + 1; k < 64; k++) - { - if ((tempMat.M[k] & idM64[i]) == idM64[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 64; k++) { + if ((tempMat.M[k] & idM64[i]) == idM64[i]) { tempMat.M[k] ^= tempMat.M[i]; (*Mat_inv).M[k] ^= (*Mat_inv).M[i]; @@ -3308,14 +2884,10 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a } } } - if (!invertible) // not invertible - { - for (t = 63; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM64[t]) == idM64[t]) - { + if (!invertible) { /* not invertible */ + for (t = 63; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM64[t]) == idM64[t]) { tempMat.M[j] ^= tempMat.M[t]; (*Mat_inv).M[j] ^= (*Mat_inv).M[t]; trail[times][0] = 1; @@ -3326,28 +2898,19 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]] ^= (*Mat).M[trail[j][2]]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]]; (*Mat).M[trail[j][1]] = (*Mat).M[trail[j][2]]; (*Mat).M[trail[j][2]] = temp; } } - } - else // invertible - { - for (i = 63; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j] & idM64[i]) == idM64[i]) - { + } else { /* invertible */ + for (i = 63; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j] & idM64[i]) == idM64[i]) { tempMat.M[j] ^= tempMat.M[i]; (*Mat_inv).M[j] ^= (*Mat_inv).M[i]; @@ -3357,30 +2920,27 @@ void genMatpairM64(M64 *Mat, M64 *Mat_inv) // generate 64*64 invertible matrix a copyM64(resultMat, Mat); } } -void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible matrix and its inverse matrix +/* generate 128*128 invertible matrix and its inverse matrix */ +void genMatpairM128(M128 *Mat, M128 *Mat_inv) { int i, j, t, k; int p; M128 tempMat; M128 resultMat; uint64_t temp; - uint8_t trail[16384][3]; // generate trail + uint8_t trail[16384][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM128(Mat); identityM128(Mat_inv); randM128(&tempMat); copyM128(tempMat, &resultMat); - for (i = 0; i < 64; i++) // diagonal = 1? - { - if ((tempMat.M[i][0] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 128; j++) - { - if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { /* diagonal = 1? */ + if ((tempMat.M[i][0] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 128; j++) { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) { tempMat.M[j][0] ^= tempMat.M[i][0]; tempMat.M[j][1] ^= tempMat.M[i][1]; @@ -3393,14 +2953,10 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 128; j++) - { - if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) - { + for (j = i + 1; j < 128; j++) { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) { temp = tempMat.M[i][0]; tempMat.M[i][0] = tempMat.M[j][0]; tempMat.M[j][0] = temp; @@ -3426,10 +2982,9 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - p = i + 1 + cus_random() % (127 - i); // swap + p = i + 1 + cus_random() % (127 - i); /* swap */ temp = tempMat.M[p][0]; tempMat.M[p][0] = tempMat.M[i][0]; @@ -3452,10 +3007,8 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat trail[times][2] = i; times++; - for (t = i + 1; t < 128; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 128; t++) { + if (cus_random() % 2) { tempMat.M[t][0] ^= tempMat.M[i][0]; tempMat.M[t][1] ^= tempMat.M[i][1]; @@ -3467,13 +3020,9 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat times++; } } - } - else // can still contiune - { - for (k = i + 1; k < 128; k++) - { - if ((tempMat.M[k][0] & idM64[i]) == idM64[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 128; k++) { + if ((tempMat.M[k][0] & idM64[i]) == idM64[i]) { tempMat.M[k][0] ^= tempMat.M[i][0]; tempMat.M[k][1] ^= tempMat.M[i][1]; @@ -3489,14 +3038,10 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat } } } - for (i = 64; i < 128; i++) // diagonal = 1? - { - if ((tempMat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) - { - for (j = i + 1; j < 128; j++) - { - if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 64; i < 128; i++) { /* diagonal = 1? */ + if ((tempMat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) { + for (j = i + 1; j < 128; j++) { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { tempMat.M[j][1] ^= tempMat.M[i][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -3508,14 +3053,10 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 128; j++) - { - if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (j = i + 1; j < 128; j++) { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { temp = tempMat.M[i][1]; tempMat.M[i][1] = tempMat.M[j][1]; tempMat.M[j][1] = temp; @@ -3537,12 +3078,10 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 127) - { - p = i + 1 + cus_random() % (127 - i); // swap + if (i < 127) { + p = i + 1 + cus_random() % (127 - i); /* swap */ temp = tempMat.M[p][1]; tempMat.M[p][1] = tempMat.M[i][1]; @@ -3561,10 +3100,8 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat trail[times][2] = i; times++; - for (t = i + 1; t < 128; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 128; t++) { + if (cus_random() % 2) { tempMat.M[t][1] ^= tempMat.M[i][1]; (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; @@ -3576,13 +3113,9 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat } } } - } - else // can still contiune - { - for (k = i + 1; k < 128; k++) - { - if ((tempMat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 128; k++) { + if ((tempMat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) { tempMat.M[k][1] ^= tempMat.M[i][1]; (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; @@ -3597,14 +3130,10 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat } } } - if (!invertible) // not invertible - { - for (t = 127; t >= 64; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j][1] & idM64[t - 64]) == idM64[t - 64]) - { + if (!invertible) { /* not invertible */ + for (t = 127; t >= 64; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j][1] & idM64[t - 64]) == idM64[t - 64]) { tempMat.M[j][1] ^= tempMat.M[t][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; @@ -3617,12 +3146,9 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat } } } - for (t = 63; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j][0] & idM64[t]) == idM64[t]) - { + for (t = 63; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j][0] & idM64[t]) == idM64[t]) { tempMat.M[j][0] ^= tempMat.M[t][0]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; @@ -3636,15 +3162,11 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]][0] ^= (*Mat).M[trail[j][2]][0]; (*Mat).M[trail[j][1]][1] ^= (*Mat).M[trail[j][2]][1]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]][0]; (*Mat).M[trail[j][1]][0] = (*Mat).M[trail[j][2]][0]; (*Mat).M[trail[j][2]][0] = temp; @@ -3654,15 +3176,10 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat (*Mat).M[trail[j][2]][1] = temp; } } - } - else // invertible - { - for (i = 127; i >= 64; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + } else { /* invertible */ + for (i = 127; i >= 64; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { tempMat.M[j][1] ^= tempMat.M[i][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -3670,12 +3187,9 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat } } } - for (i = 63; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 63; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) { tempMat.M[j][0] ^= tempMat.M[i][0]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -3686,30 +3200,27 @@ void genMatpairM128(M128 *Mat, M128 *Mat_inv) // generate 128*128 invertible mat copyM128(resultMat, Mat); } } -void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible matrix and its inverse matrix +/* generate 256*256 invertible matrix and its inverse matrix */ +void genMatpairM256(M256 *Mat, M256 *Mat_inv) { int i, j, t, k; int p; M256 tempMat; M256 resultMat; uint64_t temp; - uint8_t trail[65536][3]; // generate trail + uint8_t trail[65536][3]; /* generate trail */ int flag = 0; int times = 0; int invertible = 1; - // InitRandom((randseed++) ^ ((unsigned int)time(NULL))); + /* InitRandom((randseed++) ^ ((unsigned int)time(NULL))); */ identityM256(Mat); identityM256(Mat_inv); randM256(&tempMat); copyM256(tempMat, &resultMat); - for (i = 0; i < 64; i++) // diagonal = 1? - { - if ((tempMat.M[i][0] & idM64[i]) == idM64[i]) - { - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 0; i < 64; i++) { /* diagonal = 1? */ + if ((tempMat.M[i][0] & idM64[i]) == idM64[i]) { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) { tempMat.M[j][0] ^= tempMat.M[i][0]; tempMat.M[j][1] ^= tempMat.M[i][1]; tempMat.M[j][2] ^= tempMat.M[i][2]; @@ -3726,14 +3237,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) - { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) { temp = tempMat.M[i][0]; tempMat.M[i][0] = tempMat.M[j][0]; tempMat.M[j][0] = temp; @@ -3775,10 +3282,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - p = i + 1 + cus_random() % (255 - i); // swap + p = i + 1 + cus_random() % (255 - i); /* swap */ temp = tempMat.M[p][0]; tempMat.M[p][0] = tempMat.M[i][0]; @@ -3817,10 +3323,8 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat trail[times][2] = i; times++; - for (t = i + 1; t < 256; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 256; t++) { + if (cus_random() % 2) { tempMat.M[t][0] ^= tempMat.M[i][0]; tempMat.M[t][1] ^= tempMat.M[i][1]; tempMat.M[t][2] ^= tempMat.M[i][2]; @@ -3836,13 +3340,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat times++; } } - } - else // can still contiune - { - for (k = i + 1; k < 256; k++) - { - if ((tempMat.M[k][0] & idM64[i]) == idM64[i]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 256; k++) { + if ((tempMat.M[k][0] & idM64[i]) == idM64[i]) { tempMat.M[k][0] ^= tempMat.M[i][0]; tempMat.M[k][1] ^= tempMat.M[i][1]; tempMat.M[k][2] ^= tempMat.M[i][2]; @@ -3862,14 +3362,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (i = 64; i < 128; i++) // diagonal = 1? - { - if ((tempMat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) - { - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 64; i < 128; i++) { /* diagonal = 1? */ + if ((tempMat.M[i][1] & idM64[i - 64]) == idM64[i - 64]) { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { tempMat.M[j][1] ^= tempMat.M[i][1]; tempMat.M[j][2] ^= tempMat.M[i][2]; tempMat.M[j][3] ^= tempMat.M[i][3]; @@ -3885,14 +3381,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { temp = tempMat.M[i][1]; tempMat.M[i][1] = tempMat.M[j][1]; tempMat.M[j][1] = temp; @@ -3930,12 +3422,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - // if(i < 127) - { - p = i + 1 + cus_random() % (255 - i); // swap + /* if(i < 127) */ { + p = i + 1 + cus_random() % (255 - i); /* swap */ temp = tempMat.M[p][1]; tempMat.M[p][1] = tempMat.M[i][1]; @@ -3970,10 +3460,8 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat trail[times][2] = i; times++; - for (t = i + 1; t < 256; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 256; t++) { + if (cus_random() % 2) { tempMat.M[t][1] ^= tempMat.M[i][1]; tempMat.M[t][2] ^= tempMat.M[i][2]; tempMat.M[t][3] ^= tempMat.M[i][3]; @@ -3989,13 +3477,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - } - else // can still contiune - { - for (k = i + 1; k < 256; k++) - { - if ((tempMat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 256; k++) { + if ((tempMat.M[k][1] & idM64[i - 64]) == idM64[i - 64]) { tempMat.M[k][1] ^= tempMat.M[i][1]; tempMat.M[k][2] ^= tempMat.M[i][2]; tempMat.M[k][3] ^= tempMat.M[i][3]; @@ -4014,14 +3498,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (i = 128; i < 192; i++) // diagonal = 1? - { - if ((tempMat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) - { - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (i = 128; i < 192; i++) { /* diagonal = 1? */ + if ((tempMat.M[i][2] & idM64[i - 128]) == idM64[i - 128]) { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { tempMat.M[j][2] ^= tempMat.M[i][2]; tempMat.M[j][3] ^= tempMat.M[i][3]; @@ -4036,14 +3516,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { temp = tempMat.M[i][2]; tempMat.M[i][2] = tempMat.M[j][2]; tempMat.M[j][2] = temp; @@ -4077,12 +3553,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - // if(i < 127) - { - p = i + 1 + cus_random() % (255 - i); // swap + /* if(i < 127) */ { + p = i + 1 + cus_random() % (255 - i); /* swap */ temp = tempMat.M[p][2]; tempMat.M[p][2] = tempMat.M[i][2]; @@ -4113,10 +3587,8 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat trail[times][2] = i; times++; - for (t = i + 1; t < 256; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 256; t++) { + if (cus_random() % 2) { tempMat.M[t][2] ^= tempMat.M[i][2]; tempMat.M[t][3] ^= tempMat.M[i][3]; @@ -4131,13 +3603,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - } - else // can still contiune - { - for (k = i + 1; k < 256; k++) - { - if ((tempMat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 256; k++) { + if ((tempMat.M[k][2] & idM64[i - 128]) == idM64[i - 128]) { tempMat.M[k][2] ^= tempMat.M[i][2]; tempMat.M[k][3] ^= tempMat.M[i][3]; @@ -4155,14 +3623,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (i = 192; i < 256; i++) // diagonal = 1? - { - if ((tempMat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) - { - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (i = 192; i < 256; i++) { /* diagonal = 1? */ + if ((tempMat.M[i][3] & idM64[i - 192]) == idM64[i - 192]) { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { tempMat.M[j][3] ^= tempMat.M[i][3]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -4176,14 +3640,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat times++; } } - } - else // swap to find 1 - { + } else { /* swap to find 1 */ flag = 1; - for (j = i + 1; j < 256; j++) - { - if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + for (j = i + 1; j < 256; j++) { + if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { temp = tempMat.M[i][3]; tempMat.M[i][3] = tempMat.M[j][3]; tempMat.M[j][3] = temp; @@ -4213,12 +3673,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat break; } } - if (flag) // can not find 1 which means not invertible - { + if (flag) { /* can not find 1 which means not invertible */ invertible = 0; - if (i < 255) - { - p = i + 1 + cus_random() % (255 - i); // swap + if (i < 255) { + p = i + 1 + cus_random() % (255 - i); /* swap */ temp = tempMat.M[p][3]; tempMat.M[p][3] = tempMat.M[i][3]; @@ -4245,10 +3703,8 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat trail[times][2] = i; times++; - for (t = i + 1; t < 256; t++) - { - if (cus_random() % 2) - { + for (t = i + 1; t < 256; t++) { + if (cus_random() % 2) { tempMat.M[t][3] ^= tempMat.M[i][3]; (*Mat_inv).M[t][0] ^= (*Mat_inv).M[i][0]; @@ -4262,13 +3718,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - } - else // can still contiune - { - for (k = i + 1; k < 256; k++) - { - if ((tempMat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) - { + } else { /* can still contiune */ + for (k = i + 1; k < 256; k++) { + if ((tempMat.M[k][3] & idM64[i - 192]) == idM64[i - 192]) { tempMat.M[k][3] ^= tempMat.M[i][3]; (*Mat_inv).M[k][0] ^= (*Mat_inv).M[i][0]; @@ -4285,14 +3737,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - if (!invertible) // not invertible - { - for (t = 255; t >= 192; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j][3] & idM64[t - 192]) == idM64[t - 192]) - { + if (!invertible) { /* not invertible */ + for (t = 255; t >= 192; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j][3] & idM64[t - 192]) == idM64[t - 192]) { tempMat.M[j][3] ^= tempMat.M[t][3]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; @@ -4307,12 +3755,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (t = 191; t >= 128; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j][2] & idM64[t - 128]) == idM64[t - 128]) - { + for (t = 191; t >= 128; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j][2] & idM64[t - 128]) == idM64[t - 128]) { tempMat.M[j][2] ^= tempMat.M[t][2]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; @@ -4327,12 +3772,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (t = 127; t >= 64; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j][1] & idM64[t - 64]) == idM64[t - 64]) - { + for (t = 127; t >= 64; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j][1] & idM64[t - 64]) == idM64[t - 64]) { tempMat.M[j][1] ^= tempMat.M[t][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; @@ -4347,12 +3789,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (t = 63; t >= 0; t--) - { - for (j = t - 1; j >= 0; j--) - { - if ((tempMat.M[j][0] & idM64[t]) == idM64[t]) - { + for (t = 63; t >= 0; t--) { + for (j = t - 1; j >= 0; j--) { + if ((tempMat.M[j][0] & idM64[t]) == idM64[t]) { tempMat.M[j][0] ^= tempMat.M[t][0]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[t][0]; @@ -4368,17 +3807,13 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } - for (j = times - 1; j >= 0; j--) // generate inverse matrix - { - if (trail[j][0]) // add - { + for (j = times - 1; j >= 0; j--) { /* generate inverse matrix */ + if (trail[j][0]) { /* add */ (*Mat).M[trail[j][1]][0] ^= (*Mat).M[trail[j][2]][0]; (*Mat).M[trail[j][1]][1] ^= (*Mat).M[trail[j][2]][1]; (*Mat).M[trail[j][1]][2] ^= (*Mat).M[trail[j][2]][2]; (*Mat).M[trail[j][1]][3] ^= (*Mat).M[trail[j][2]][3]; - } - else // swap - { + } else { /* swap */ temp = (*Mat).M[trail[j][1]][0]; (*Mat).M[trail[j][1]][0] = (*Mat).M[trail[j][2]][0]; (*Mat).M[trail[j][2]][0] = temp; @@ -4396,15 +3831,10 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat (*Mat).M[trail[j][2]][3] = temp; } } - } - else // invertible - { - for (i = 255; i >= 192; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) - { + } else { /* invertible */ + for (i = 255; i >= 192; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j][3] & idM64[i - 192]) == idM64[i - 192]) { tempMat.M[j][3] ^= tempMat.M[i][3]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -4414,12 +3844,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (i = 191; i >= 128; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) - { + for (i = 191; i >= 128; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j][2] & idM64[i - 128]) == idM64[i - 128]) { tempMat.M[j][2] ^= tempMat.M[i][2]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -4429,12 +3856,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (i = 127; i >= 64; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) - { + for (i = 127; i >= 64; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j][1] & idM64[i - 64]) == idM64[i - 64]) { tempMat.M[j][1] ^= tempMat.M[i][1]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -4444,12 +3868,9 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat } } } - for (i = 63; i >= 0; i--) - { - for (j = i - 1; j >= 0; j--) - { - if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) - { + for (i = 63; i >= 0; i--) { + for (j = i - 1; j >= 0; j--) { + if ((tempMat.M[j][0] & idM64[i]) == idM64[i]) { tempMat.M[j][0] ^= tempMat.M[i][0]; (*Mat_inv).M[j][0] ^= (*Mat_inv).M[i][0]; @@ -4462,74 +3883,76 @@ void genMatpairM256(M256 *Mat, M256 *Mat_inv) // generate 256*256 invertible mat copyM256(resultMat, Mat); } } -void genaffinepairM4(Aff4 *aff, Aff4 *aff_inv) // generate a pair of affine +void genaffinepairM4(Aff4 *aff, Aff4 *aff_inv) /* generate a pair of affine */ { genMatpairM4(&(aff->Mat), &(aff_inv->Mat)); randV4(&(aff->Vec)); MatMulVecM4((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); } -void genaffinepairM8(Aff8 *aff, Aff8 *aff_inv) // generate a pair of affine +void genaffinepairM8(Aff8 *aff, Aff8 *aff_inv) /* generate a pair of affine */ { genMatpairM8(&(aff->Mat), &(aff_inv->Mat)); randV8(&(aff->Vec)); MatMulVecM8((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); } -void genaffinepairM16(Aff16 *aff, Aff16 *aff_inv) // generate a pair of affine +/* generate a pair of affine */ +void genaffinepairM16(Aff16 *aff, Aff16 *aff_inv) { genMatpairM16(&(aff->Mat), &(aff_inv->Mat)); randV16(&(aff->Vec)); MatMulVecM16((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); } -void genaffinepairM32(Aff32 *aff, Aff32 *aff_inv) // generate a pair of affine +/* generate a pair of affine */ +void genaffinepairM32(Aff32 *aff, Aff32 *aff_inv) { genMatpairM32(&(aff->Mat), &(aff_inv->Mat)); randV32(&(aff->Vec)); MatMulVecM32((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); } -void genaffinepairM64(Aff64 *aff, Aff64 *aff_inv) // generate a pair of affine +/* generate a pair of affine */ +void genaffinepairM64(Aff64 *aff, Aff64 *aff_inv) { genMatpairM64(&(aff->Mat), &(aff_inv->Mat)); randV64(&(aff->Vec)); MatMulVecM64((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); } -void genaffinepairM128(Aff128 *aff, Aff128 *aff_inv) // generate a pair of affine +/* generate a pair of affine */ +void genaffinepairM128(Aff128 *aff, Aff128 *aff_inv) { genMatpairM128(&(aff->Mat), &(aff_inv->Mat)); randV128(&(aff->Vec)); MatMulVecM128((*aff_inv).Mat, (*aff).Vec, &(aff_inv->Vec)); } -void MatrixcomM8to32(M8 m1, M8 m2, M8 m3, M8 m4, M32 *mat) // diagonal matrix concatenation, four 8*8 -> 32*32 +/* diagonal matrix concatenation, four 8*8 -> 32*32 */ +void MatrixcomM8to32(M8 m1, M8 m2, M8 m3, M8 m4, M32 *mat) { int i; int j = 0; uint8_t *m; initM32(mat); - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 3) = m1.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 2) = m2.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 1) = m3.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *m = m4.M[i]; j++; } } -void VectorcomV8to32(V8 v1, V8 v2, V8 v3, V8 v4, V32 *vec) // 4 vectors concatenation +/* 4 vectors concatenation */ +void VectorcomV8to32(V8 v1, V8 v2, V8 v3, V8 v4, V32 *vec) { uint8_t *v; v = (uint8_t *)&(*vec).V; @@ -4538,43 +3961,42 @@ void VectorcomV8to32(V8 v1, V8 v2, V8 v3, V8 v4, V32 *vec) // 4 vectors concaten *(v + 1) = v3.V; *v = v4.V; } -void affinecomM8to32(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff32 *aff) // diagonal affine concatenation, four 8*8 -> 32*32 +/* diagonal affine concatenation, four 8*8 -> 32*32 */ +void affinecomM8to32(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff32 *aff) { MatrixcomM8to32(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, &(aff->Mat)); VectorcomV8to32(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, &(aff->Vec)); } -void MatrixcomM16to64(M16 m1, M16 m2, M16 m3, M16 m4, M64 *mat) // diagonal matrix concatenation, four 16*16 -> 64*64 +/* diagonal matrix concatenation, four 16*16 -> 64*64 */ +void MatrixcomM16to64(M16 m1, M16 m2, M16 m3, M16 m4, M64 *mat) { int i; int j = 0; uint16_t *m; initM64(mat); - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j]; *(m + 3) = m1.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j]; *(m + 2) = m2.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j]; *(m + 1) = m3.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j]; *m = m4.M[i]; j++; } } -void VectorcomV16to64(V16 v1, V16 v2, V16 v3, V16 v4, V64 *vec) // 4 vectors concatenation +/* 4 vectors concatenation */ +void VectorcomV16to64(V16 v1, V16 v2, V16 v3, V16 v4, V64 *vec) { uint16_t *v; v = (uint16_t *)&(*vec).V; @@ -4583,67 +4005,65 @@ void VectorcomV16to64(V16 v1, V16 v2, V16 v3, V16 v4, V64 *vec) // 4 vectors con *(v + 1) = v3.V; *v = v4.V; } -void affinecomM16to64(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff64 *aff) // diagonal affine concatenation,four 16*16 -> 64*64 +/* diagonal affine concatenation,four 16*16 -> 64*64 */ +void affinecomM16to64(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, + Aff64 *aff) { MatrixcomM16to64(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, &(aff->Mat)); VectorcomV16to64(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, &(aff->Vec)); } -void MatrixcomM8to64(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M64 *mat) // diagonal matrix concatenation,four 8*8 -> 64*64 +/* diagonal matrix concatenation,four 8*8 -> 64*64 */ +void MatrixcomM8to64(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, + M64 *mat) { int i; int j = 0; uint8_t *m; initM64(mat); - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 7) = m1.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 6) = m2.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 5) = m3.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 4) = m4.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 3) = m5.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 2) = m6.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *(m + 1) = m7.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j]; *m = m8.M[i]; j++; } } -void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V64 *vec) // 8 vectors concatenation +/* 8 vectors concatenation */ +void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, + V64 *vec) { uint8_t *v; v = (uint8_t *)&(*vec).V; @@ -4656,43 +4076,45 @@ void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V64 *(v + 1) = v7.V; *v = v8.V; } -void affinecomM8to64(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff64 *aff) // diagonal affine concatenation, four 8*8 -> 64*64 +/* diagonal affine concatenation, four 8*8 -> 64*64 */ +void affinecomM8to64(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, + Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff64 *aff) { - MatrixcomM8to64(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, aff7.Mat, aff8.Mat, &(aff->Mat)); - VectorcomV8to64(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, aff7.Vec, aff8.Vec, &(aff->Vec)); + MatrixcomM8to64(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, + aff7.Mat, aff8.Mat, &(aff->Mat)); + VectorcomV8to64(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, + aff7.Vec, aff8.Vec, &(aff->Vec)); } -void MatrixcomM32to128(M32 m1, M32 m2, M32 m3, M32 m4, M128 *mat) // diagonal matrix concatenation, four 32*32 -> 128*128 +/* diagonal matrix concatenation, four 32*32 -> 128*128 */ +void MatrixcomM32to128(M32 m1, M32 m2, M32 m3, M32 m4, M128 *mat) { int i; int j = 0; uint32_t *m; initM128(mat); - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { m = (uint32_t *)&(*mat).M[j][0]; *(m + 1) = m1.M[i]; j++; } - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { m = (uint32_t *)&(*mat).M[j][0]; *m = m2.M[i]; j++; } - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { m = (uint32_t *)&(*mat).M[j][1]; *(m + 1) = m3.M[i]; j++; } - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { m = (uint32_t *)&(*mat).M[j][1]; *m = m4.M[i]; j++; } } -void VectorcomV32to128(V32 v1, V32 v2, V32 v3, V32 v4, V128 *vec) // 4 vectors concatenation +/* 4 vectors concatenation */ +void VectorcomV32to128(V32 v1, V32 v2, V32 v3, V32 v4, V128 *vec) { uint32_t *v; v = (uint32_t *)&(*vec).V[0]; @@ -4702,115 +4124,107 @@ void VectorcomV32to128(V32 v1, V32 v2, V32 v3, V32 v4, V128 *vec) // 4 vectors c *(v + 1) = v3.V; *v = v4.V; } -void affinecomM32to128(Aff32 aff1, Aff32 aff2, Aff32 aff3, Aff32 aff4, Aff128 *aff) // diagonal affine concatenation, four 32*32 -> 128*128 +/* diagonal affine concatenation, four 32*32 -> 128*128 */ +void affinecomM32to128(Aff32 aff1, Aff32 aff2, Aff32 aff3, Aff32 aff4, + Aff128 *aff) { MatrixcomM32to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, &(aff->Mat)); VectorcomV32to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, &(aff->Vec)); } -void MatrixcomM8to128(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M8 m9, M8 m10, M8 m11, M8 m12, M8 m13, M8 m14, M8 m15, M8 m16, M128 *mat) // diagonal matrix concatenation, 16 8*8 -> 128*128 +/* diagonal matrix concatenation, 16 8*8 -> 128*128 */ +void MatrixcomM8to128(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, + M8 m9, M8 m10, M8 m11, M8 m12, M8 m13, M8 m14, M8 m15, + M8 m16, M128 *mat) { int i; int j = 0; uint8_t *m; initM128(mat); - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 7) = m1.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 6) = m2.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 5) = m3.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 4) = m4.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 3) = m5.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 2) = m6.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *(m + 1) = m7.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][0]; *m = m8.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 7) = m9.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 6) = m10.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 5) = m11.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 4) = m12.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 3) = m13.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 2) = m14.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *(m + 1) = m15.M[i]; j++; } - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { m = (uint8_t *)&(*mat).M[j][1]; *m = m16.M[i]; j++; } } -void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V8 v9, V8 v10, V8 v11, V8 v12, V8 v13, V8 v14, V8 v15, V8 v16, V128 *vec) // 16 vectors concatenation +/* 16 vectors concatenation */ +void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, + V8 v9, V8 v10, V8 v11, V8 v12, V8 v13, V8 v14, V8 v15, + V8 v16, V128 *vec) { uint8_t *v; v = (uint8_t *)&(*vec).V[0]; @@ -4832,67 +4246,73 @@ void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V8 *(v + 1) = v15.V; *v = v16.V; } -void affinecomM8to128(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff8 aff9, Aff8 aff10, Aff8 aff11, Aff8 aff12, Aff8 aff13, Aff8 aff14, Aff8 aff15, Aff8 aff16, Aff128 *aff) // diagonal affine concatenation, 16 8*8 -> 128*128 -{ - MatrixcomM8to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, aff7.Mat, aff8.Mat, aff9.Mat, aff10.Mat, aff11.Mat, aff12.Mat, aff13.Mat, aff14.Mat, aff15.Mat, aff16.Mat, &(aff->Mat)); - VectorcomV8to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, aff7.Vec, aff8.Vec, aff9.Vec, aff10.Vec, aff11.Vec, aff12.Vec, aff13.Vec, aff14.Vec, aff15.Vec, aff16.Vec, &(aff->Vec)); -} -void MatrixcomM16to128(M16 m1, M16 m2, M16 m3, M16 m4, M16 m5, M16 m6, M16 m7, M16 m8, M128 *mat) // diagonal matrix concatenation, 8 16*16 -> 128*128 +/* diagonal affine concatenation, 16 8*8 -> 128*128 */ +void affinecomM8to128(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, + Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff8 aff9, Aff8 aff10, + Aff8 aff11, Aff8 aff12, Aff8 aff13, Aff8 aff14, + Aff8 aff15, Aff8 aff16, Aff128 *aff) +{ + MatrixcomM8to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, + aff7.Mat, aff8.Mat, aff9.Mat, aff10.Mat, aff11.Mat, + aff12.Mat, aff13.Mat, aff14.Mat, aff15.Mat, aff16.Mat, + &(aff->Mat)); + VectorcomV8to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, + aff7.Vec, aff8.Vec, aff9.Vec, aff10.Vec, aff11.Vec, + aff12.Vec, aff13.Vec, aff14.Vec, aff15.Vec, aff16.Vec, + &(aff->Vec)); +} +/* diagonal matrix concatenation, 8 16*16 -> 128*128 */ +void MatrixcomM16to128(M16 m1, M16 m2, M16 m3, M16 m4, M16 m5, M16 m6, M16 m7, + M16 m8, M128 *mat) { int i; int j = 0; uint16_t *m; initM128(mat); - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][0]; *(m + 3) = m1.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][0]; *(m + 2) = m2.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][0]; *(m + 1) = m3.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][0]; *m = m4.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][1]; *(m + 3) = m5.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][1]; *(m + 2) = m6.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][1]; *(m + 1) = m7.M[i]; j++; } - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { m = (uint16_t *)&(*mat).M[j][1]; *m = m8.M[i]; j++; } } -void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, V16 v8, V128 *vec) // 8 vectors concatenation +/* 8 vectors concatenation */ +void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, + V16 v8, V128 *vec) { uint16_t *v; v = (uint16_t *)&(*vec).V[0]; @@ -4906,53 +4326,57 @@ void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, V *(v + 1) = v7.V; *v = v8.V; } -void affinecomM16to128(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff16 aff5, Aff16 aff6, Aff16 aff7, Aff16 aff8, Aff128 *aff) // diagonal affine concatenation, 8 16*16 -> 128*128 +/* diagonal affine concatenation, 8 16*16 -> 128*128 */ +void affinecomM16to128(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, + Aff16 aff5, Aff16 aff6, Aff16 aff7, Aff16 aff8, + Aff128 *aff) { - MatrixcomM16to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, aff6.Mat, aff7.Mat, aff8.Mat, &(aff->Mat)); - VectorcomV16to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, aff6.Vec, aff7.Vec, aff8.Vec, &(aff->Vec)); + MatrixcomM16to128(aff1.Mat, aff2.Mat, aff3.Mat, aff4.Mat, aff5.Mat, + aff6.Mat, aff7.Mat, aff8.Mat, &(aff->Mat)); + VectorcomV16to128(aff1.Vec, aff2.Vec, aff3.Vec, aff4.Vec, aff5.Vec, + aff6.Vec, aff7.Vec, aff8.Vec, &(aff->Vec)); } -void MattransM4(M4 Mat, M4 *Mat_trans) // matrix tansposition M4 +void MattransM4(M4 Mat, M4 *Mat_trans) /* matrix tansposition M4 */ { int i, j; uint8_t mask[2], k, k2, l, temp; mask[0] = 0x5; mask[1] = 0x3; - for (j = 0; j < 2; j++) - { + for (j = 0; j < 2; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 2; i++) - { + for (i = 0; i < 2; i++) { l = (k2 * i) % 3; - temp = ((Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k)) & 0x0f; - Mat.M[l + k] = ((Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k)) & 0x0f; + temp = ((Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k)) + & 0x0f; + Mat.M[l + k] = ((Mat.M[l + k] & mask[j]) ^ + ((Mat.M[l] & mask[j]) << k)) & 0x0f; Mat.M[l] = temp; } } copyM4(Mat, Mat_trans); } -void MattransM8(M8 Mat, M8 *Mat_trans) // matrix tansposition M8 +void MattransM8(M8 Mat, M8 *Mat_trans) /* matrix tansposition M8 */ { int i, j; uint8_t mask[3], k, k2, l, temp; mask[0] = 0x55; mask[1] = 0x33; mask[2] = 0x0f; - for (j = 0; j < 3; j++) - { + for (j = 0; j < 3; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { l = (k2 * i) % 7; temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); - Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ + ((Mat.M[l] & mask[j]) << k); Mat.M[l] = temp; } } copyM8(Mat, Mat_trans); } -void MattransM16(M16 Mat, M16 *Mat_trans) // matrix tansposition M16 +void MattransM16(M16 Mat, M16 *Mat_trans) /* matrix tansposition M16 */ { int i, j; uint16_t mask[4], k, k2, l, temp; @@ -4960,21 +4384,20 @@ void MattransM16(M16 Mat, M16 *Mat_trans) // matrix tansposition M16 mask[1] = 0x3333; mask[2] = 0x0f0f; mask[3] = 0x00ff; - for (j = 0; j < 4; j++) - { + for (j = 0; j < 4; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { l = (k2 * i) % 15; temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); - Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ + ((Mat.M[l] & mask[j]) << k); Mat.M[l] = temp; } } copyM16(Mat, Mat_trans); } -void MattransM32(M32 Mat, M32 *Mat_trans) // matrix tansposition M32 +void MattransM32(M32 Mat, M32 *Mat_trans) /* matrix tansposition M32 */ { int i, j; uint32_t mask[5], k, k2, l, temp; @@ -4983,21 +4406,20 @@ void MattransM32(M32 Mat, M32 *Mat_trans) // matrix tansposition M32 mask[2] = 0x0f0f0f0f; mask[3] = 0x00ff00ff; mask[4] = 0x0000ffff; - for (j = 0; j < 5; j++) - { + for (j = 0; j < 5; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { l = (k2 * i) % 31; temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); - Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ + ((Mat.M[l] & mask[j]) << k); Mat.M[l] = temp; } } copyM32(Mat, Mat_trans); } -void MattransM64(M64 Mat, M64 *Mat_trans) // matrix tansposition M64 +void MattransM64(M64 Mat, M64 *Mat_trans) /* matrix tansposition M64 */ { int i, j; uint64_t mask[6], k, k2, l, temp; @@ -5007,21 +4429,20 @@ void MattransM64(M64 Mat, M64 *Mat_trans) // matrix tansposition M64 mask[3] = 0x00ff00ff00ff00ff; mask[4] = 0x0000ffff0000ffff; mask[5] = 0x00000000ffffffff; - for (j = 0; j < 6; j++) - { + for (j = 0; j < 6; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { l = (k2 * i) % 63; temp = (Mat.M[l] & ~mask[j]) ^ ((Mat.M[l + k] & ~mask[j]) >> k); - Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ ((Mat.M[l] & mask[j]) << k); + Mat.M[l + k] = (Mat.M[l + k] & mask[j]) ^ + ((Mat.M[l] & mask[j]) << k); Mat.M[l] = temp; } } copyM64(Mat, Mat_trans); } -void MattransM128(M128 Mat, M128 *Mat_trans) // matrix tansposition M128 +void MattransM128(M128 Mat, M128 *Mat_trans) /* matrix tansposition M128 */ { int i, j; uint64_t mask[6], k, k2, l, temp; @@ -5031,31 +4452,32 @@ void MattransM128(M128 Mat, M128 *Mat_trans) // matrix tansposition M128 mask[3] = 0x00ff00ff00ff00ff; mask[4] = 0x0000ffff0000ffff; mask[5] = 0x00000000ffffffff; - for (j = 0; j < 6; j++) - { + for (j = 0; j < 6; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { l = (k2 * i) % 127; - temp = (Mat.M[l][0] & ~mask[j]) ^ ((Mat.M[l + k][0] & ~mask[j]) >> k); - Mat.M[l + k][0] = (Mat.M[l + k][0] & mask[j]) ^ ((Mat.M[l][0] & mask[j]) << k); + temp = (Mat.M[l][0] & ~mask[j]) ^ + ((Mat.M[l + k][0] & ~mask[j]) >> k); + Mat.M[l + k][0] = (Mat.M[l + k][0] & mask[j]) ^ + ((Mat.M[l][0] & mask[j]) << k); Mat.M[l][0] = temp; - temp = (Mat.M[l][1] & ~mask[j]) ^ ((Mat.M[l + k][1] & ~mask[j]) >> k); - Mat.M[l + k][1] = (Mat.M[l + k][1] & mask[j]) ^ ((Mat.M[l][1] & mask[j]) << k); + temp = (Mat.M[l][1] & ~mask[j]) ^ + ((Mat.M[l + k][1] & ~mask[j]) >> k); + Mat.M[l + k][1] = (Mat.M[l + k][1] & mask[j]) ^ + ((Mat.M[l][1] & mask[j]) << k); Mat.M[l][1] = temp; } } - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { temp = Mat.M[i + 64][0]; Mat.M[i + 64][0] = Mat.M[i][1]; Mat.M[i][1] = temp; } copyM128(Mat, Mat_trans); } -void MattransM256(M256 Mat, M256 *Mat_trans) // matrix tansposition M128 +void MattransM256(M256 Mat, M256 *Mat_trans) /* matrix tansposition M128 */ { int i, j; uint64_t mask[6], k, k2, l, temp; @@ -5065,32 +4487,37 @@ void MattransM256(M256 Mat, M256 *Mat_trans) // matrix tansposition M128 mask[3] = 0x00ff00ff00ff00ff; mask[4] = 0x0000ffff0000ffff; mask[5] = 0x00000000ffffffff; - for (j = 0; j < 6; j++) - { + for (j = 0; j < 6; j++) { k = 1 << j; k2 = k * 2; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { l = (k2 * i) % 255; - temp = (Mat.M[l][0] & ~mask[j]) ^ ((Mat.M[l + k][0] & ~mask[j]) >> k); - Mat.M[l + k][0] = (Mat.M[l + k][0] & mask[j]) ^ ((Mat.M[l][0] & mask[j]) << k); + temp = (Mat.M[l][0] & ~mask[j]) ^ + ((Mat.M[l + k][0] & ~mask[j]) >> k); + Mat.M[l + k][0] = (Mat.M[l + k][0] & mask[j]) ^ + ((Mat.M[l][0] & mask[j]) << k); Mat.M[l][0] = temp; - temp = (Mat.M[l][1] & ~mask[j]) ^ ((Mat.M[l + k][1] & ~mask[j]) >> k); - Mat.M[l + k][1] = (Mat.M[l + k][1] & mask[j]) ^ ((Mat.M[l][1] & mask[j]) << k); + temp = (Mat.M[l][1] & ~mask[j]) ^ + ((Mat.M[l + k][1] & ~mask[j]) >> k); + Mat.M[l + k][1] = (Mat.M[l + k][1] & mask[j]) ^ + ((Mat.M[l][1] & mask[j]) << k); Mat.M[l][1] = temp; - temp = (Mat.M[l][2] & ~mask[j]) ^ ((Mat.M[l + k][2] & ~mask[j]) >> k); - Mat.M[l + k][2] = (Mat.M[l + k][2] & mask[j]) ^ ((Mat.M[l][2] & mask[j]) << k); + temp = (Mat.M[l][2] & ~mask[j]) ^ + ((Mat.M[l + k][2] & ~mask[j]) >> k); + Mat.M[l + k][2] = (Mat.M[l + k][2] & mask[j]) ^ + ((Mat.M[l][2] & mask[j]) << k); Mat.M[l][2] = temp; - temp = (Mat.M[l][3] & ~mask[j]) ^ ((Mat.M[l + k][3] & ~mask[j]) >> k); - Mat.M[l + k][3] = (Mat.M[l + k][3] & mask[j]) ^ ((Mat.M[l][3] & mask[j]) << k); + temp = (Mat.M[l][3] & ~mask[j]) ^ + ((Mat.M[l + k][3] & ~mask[j]) >> k); + Mat.M[l + k][3] = (Mat.M[l + k][3] & mask[j]) ^ + ((Mat.M[l][3] & mask[j]) << k); Mat.M[l][3] = temp; } } - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { temp = Mat.M[i + 64][0]; Mat.M[i + 64][0] = Mat.M[i][1]; Mat.M[i][1] = temp; @@ -5107,8 +4534,7 @@ void MattransM256(M256 Mat, M256 *Mat_trans) // matrix tansposition M128 Mat.M[i + 192][2] = Mat.M[i + 128][3]; Mat.M[i + 128][3] = temp; } - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { temp = Mat.M[i + 128][0]; Mat.M[i + 128][0] = Mat.M[i][2]; Mat.M[i][2] = temp; @@ -5122,155 +4548,141 @@ void MattransM256(M256 Mat, M256 *Mat_trans) // matrix tansposition M128 void MatAddMatM4(M4 Mat1, M4 Mat2, M4 *Mat) { int i; - for (i = 0; i < 4; i++) - { + for (i = 0; i < 4; i++) { (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; } } void MatAddMatM8(M8 Mat1, M8 Mat2, M8 *Mat) { int i; - for (i = 0; i < 8; i++) - { + for (i = 0; i < 8; i++) { (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; } } void MatAddMatM16(M16 Mat1, M16 Mat2, M16 *Mat) { int i; - for (i = 0; i < 16; i++) - { + for (i = 0; i < 16; i++) { (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; } } void MatAddMatM32(M32 Mat1, M32 Mat2, M32 *Mat) { int i; - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; } } void MatAddMatM64(M64 Mat1, M64 Mat2, M64 *Mat) { int i; - for (i = 0; i < 64; i++) - { + for (i = 0; i < 64; i++) { (*Mat).M[i] = Mat1.M[i] ^ Mat2.M[i]; } } void MatAddMatM128(M128 Mat1, M128 Mat2, M128 *Mat) { int i; - for (i = 0; i < 128; i++) - { + for (i = 0; i < 128; i++) { (*Mat).M[i][0] = Mat1.M[i][0] ^ Mat2.M[i][0]; (*Mat).M[i][1] = Mat1.M[i][1] ^ Mat2.M[i][1]; } } void MatAddMatM256(M256 Mat1, M256 Mat2, M256 *Mat) { - for (int i = 0; i < 256; i++) - { + for (int i = 0; i < 256; i++) { (*Mat).M[i][0] = Mat1.M[i][0] ^ Mat2.M[i][0]; (*Mat).M[i][1] = Mat1.M[i][1] ^ Mat2.M[i][1]; (*Mat).M[i][2] = Mat1.M[i][2] ^ Mat2.M[i][2]; (*Mat).M[i][3] = Mat1.M[i][3] ^ Mat2.M[i][3]; } } -void MatMulMatM4(M4 Mat1, M4 Mat2, M4 *Mat) // matrix multiplication 4*4 mul 4*4 -> 4*4 +/* matrix multiplication 4*4 mul 4*4 -> 4*4 */ +void MatMulMatM4(M4 Mat1, M4 Mat2, M4 *Mat) { int i, j; M4 Mat2_trans; initM4(Mat); MattransM4(Mat2, &Mat2_trans); - for (i = 0; i < 4; i++) - { - for (j = 0; j < 4; j++) - { + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { if (xorU4(Mat1.M[i] & Mat2_trans.M[j] & 0x0f)) (*Mat).M[i] ^= idM4[j]; } } } -void MatMulMatM8(M8 Mat1, M8 Mat2, M8 *Mat) // matrix multiplication 8*8 mul 8*8 -> 8*8 +/* matrix multiplication 8*8 mul 8*8 -> 8*8 */ +void MatMulMatM8(M8 Mat1, M8 Mat2, M8 *Mat) { int i, j; M8 Mat2_trans; initM8(Mat); MattransM8(Mat2, &Mat2_trans); - for (i = 0; i < 8; i++) - { - for (j = 0; j < 8; j++) - { + for (i = 0; i < 8; i++) { + for (j = 0; j < 8; j++) { if (xorU8(Mat1.M[i] & Mat2_trans.M[j])) (*Mat).M[i] ^= idM8[j]; } } } -void MatMulMatM16(M16 Mat1, M16 Mat2, M16 *Mat) // matrix multiplication 16*16 mul 16*16 -> 16*16 +/* matrix multiplication 16*16 mul 16*16 -> 16*16 */ +void MatMulMatM16(M16 Mat1, M16 Mat2, M16 *Mat) { int i, j; M16 Mat2_trans; initM16(Mat); MattransM16(Mat2, &Mat2_trans); - for (i = 0; i < 16; i++) - { - for (j = 0; j < 16; j++) - { + for (i = 0; i < 16; i++) { + for (j = 0; j < 16; j++) { if (xorU16(Mat1.M[i] & Mat2_trans.M[j])) (*Mat).M[i] ^= idM16[j]; } } } -void MatMulMatM32(M32 Mat1, M32 Mat2, M32 *Mat) // matrix multiplication 32*32 mul 32*32 -> 32*32 +/* matrix multiplication 32*32 mul 32*32 -> 32*32 */ +void MatMulMatM32(M32 Mat1, M32 Mat2, M32 *Mat) { int i, j; M32 Mat2_trans; initM32(Mat); MattransM32(Mat2, &Mat2_trans); - for (i = 0; i < 32; i++) - { - for (j = 0; j < 32; j++) - { + for (i = 0; i < 32; i++) { + for (j = 0; j < 32; j++) { if (xorU32(Mat1.M[i] & Mat2_trans.M[j])) (*Mat).M[i] ^= idM32[j]; } } } -void MatMulMatM64(M64 Mat1, M64 Mat2, M64 *Mat) // matrix multiplication 64*64 mul 64*64 -> 64*64 +/* matrix multiplication 64*64 mul 64*64 -> 64*64 */ +void MatMulMatM64(M64 Mat1, M64 Mat2, M64 *Mat) { int i, j; M64 Mat2_trans; initM64(Mat); MattransM64(Mat2, &Mat2_trans); - for (i = 0; i < 64; i++) - { - for (j = 0; j < 64; j++) - { + for (i = 0; i < 64; i++) { + for (j = 0; j < 64; j++) { if (xorU64(Mat1.M[i] & Mat2_trans.M[j])) (*Mat).M[i] ^= idM64[j]; } } } -void MatMulMatM128(M128 Mat1, M128 Mat2, M128 *Mat) // matrix multiplication 128*128 mul 128*128 -> 128*128 +/* matrix multiplication 128*128 mul 128*128 -> 128*128 */ +void MatMulMatM128(M128 Mat1, M128 Mat2, M128 *Mat) { int i, j; M128 Mat2_trans; uint64_t temp[2]; initM128(Mat); MattransM128(Mat2, &Mat2_trans); - for (i = 0; i < 128; i++) - { - for (j = 0; j < 64; j++) - { + for (i = 0; i < 128; i++) { + for (j = 0; j < 64; j++) { temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; if (xorU128(temp)) (*Mat).M[i][0] ^= idM64[j]; } - for (j = 64; j < 128; j++) - { + for (j = 64; j < 128; j++) { temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; if (xorU128(temp)) @@ -5278,17 +4690,16 @@ void MatMulMatM128(M128 Mat1, M128 Mat2, M128 *Mat) // matrix multiplication 128 } } } -void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) // matrix multiplication 256*256 mul 256*256 -> 256*256 +/* matrix multiplication 256*256 mul 256*256 -> 256*256 */ +void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) { int i, j; M256 Mat2_trans; uint64_t temp[4]; initM256(Mat); MattransM256(Mat2, &Mat2_trans); - for (i = 0; i < 256; i++) - { - for (j = 0; j < 64; j++) - { + for (i = 0; i < 256; i++) { + for (j = 0; j < 64; j++) { temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; @@ -5296,8 +4707,7 @@ void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) // matrix multiplication 256 if (xorU256(temp)) (*Mat).M[i][0] ^= idM64[j]; } - for (j = 64; j < 128; j++) - { + for (j = 64; j < 128; j++) { temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; @@ -5305,8 +4715,7 @@ void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) // matrix multiplication 256 if (xorU256(temp)) (*Mat).M[i][1] ^= idM64[j - 64]; } - for (j = 128; j < 192; j++) - { + for (j = 128; j < 192; j++) { temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; @@ -5314,8 +4723,7 @@ void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) // matrix multiplication 256 if (xorU256(temp)) (*Mat).M[i][2] ^= idM64[j - 128]; } - for (j = 192; j < 256; j++) - { + for (j = 192; j < 256; j++) { temp[0] = Mat1.M[i][0] & Mat2_trans.M[j][0]; temp[1] = Mat1.M[i][1] & Mat2_trans.M[j][1]; temp[2] = Mat1.M[i][2] & Mat2_trans.M[j][2]; @@ -5325,40 +4733,46 @@ void MatMulMatM256(M256 Mat1, M256 Mat2, M256 *Mat) // matrix multiplication 256 } } } -void affinemixM4(Aff4 aff, Aff4 preaff_inv, Aff4 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +/* mixed transformation of (previous affine inversion) and this round affine */ +void affinemixM4(Aff4 aff, Aff4 preaff_inv, Aff4 *mixaff) { MatMulMatM4(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); MatMulVecM4(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); (*mixaff).Vec.V ^= aff.Vec.V; } -void affinemixM8(Aff8 aff, Aff8 preaff_inv, Aff8 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +/* mixed transformation of (previous affine inversion) and this round affine */ +void affinemixM8(Aff8 aff, Aff8 preaff_inv, Aff8 *mixaff) { MatMulMatM8(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); MatMulVecM8(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); (*mixaff).Vec.V ^= aff.Vec.V; } -void affinemixM16(Aff16 aff, Aff16 preaff_inv, Aff16 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +/* mixed transformation of (previous affine inversion) and this round affine */ +void affinemixM16(Aff16 aff, Aff16 preaff_inv, Aff16 *mixaff) { MatMulMatM16(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); MatMulVecM16(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); (*mixaff).Vec.V ^= aff.Vec.V; } -void affinemixM32(Aff32 aff, Aff32 preaff_inv, Aff32 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +/* mixed transformation of (previous affine inversion) and this round affine */ +void affinemixM32(Aff32 aff, Aff32 preaff_inv, Aff32 *mixaff) { MatMulMatM32(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); MatMulVecM32(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); (*mixaff).Vec.V ^= aff.Vec.V; } -void affinemixM64(Aff64 aff, Aff64 preaff_inv, Aff64 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +/* mixed transformation of (previous affine inversion) and this round affine */ +void affinemixM64(Aff64 aff, Aff64 preaff_inv, Aff64 *mixaff) { MatMulMatM64(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); MatMulVecM64(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); (*mixaff).Vec.V ^= aff.Vec.V; } -void affinemixM128(Aff128 aff, Aff128 preaff_inv, Aff128 *mixaff) // mixed transformation of (previous affine inversion) and this round affine +/* mixed transformation of (previous affine inversion) and this round affine */ +void affinemixM128(Aff128 aff, Aff128 preaff_inv, Aff128 *mixaff) { MatMulMatM128(aff.Mat, preaff_inv.Mat, &(mixaff->Mat)); MatMulVecM128(aff.Mat, preaff_inv.Vec, &(mixaff->Vec)); (*mixaff).Vec.V[0] ^= aff.Vec.V[0]; (*mixaff).Vec.V[1] ^= aff.Vec.V[1]; -} \ No newline at end of file +} diff --git a/crypto/sm4/wb/WBMatrix.h b/crypto/sm4/wb/WBMatrix.h index 377661913..092a3bf8a 100644 --- a/crypto/sm4/wb/WBMatrix.h +++ b/crypto/sm4/wb/WBMatrix.h @@ -218,10 +218,14 @@ void affinemixM64(Aff64 aff, Aff64 preaff_inv, Aff64 *mixaff); void MatrixcomM16to64(M16 m1, M16 m2, M16 m3, M16 m4, M64 *mat); void VectorcomV16to64(V16 v1, V16 v2, V16 v3, V16 v4, V64 *vec); -void affinecomM16to64(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff64 *aff); -void MatrixcomM8to64(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M64 *mat); -void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V64 *vec); -void affinecomM8to64(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff64 *aff); +void affinecomM16to64(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, + Aff64 *aff); +void MatrixcomM8to64(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, + M64 *mat); +void VectorcomV8to64(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, + V64 *vec); +void affinecomM8to64(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, + Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff64 *aff); /* * 128-bit Matrix operation @@ -262,13 +266,25 @@ void affinemixM128(Aff128 aff, Aff128 preaff_inv, Aff128 *mixaff); void MatrixcomM32to128(M32 m1, M32 m2, M32 m3, M32 m4, M128 *mat); void VectorcomV32to128(V32 v1, V32 v2, V32 v3, V32 v4, V128 *vec); -void affinecomM32to128(Aff32 aff1, Aff32 aff2, Aff32 aff3, Aff32 aff4, Aff128 *aff); -void MatrixcomM8to128(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, M8 m9, M8 m10, M8 m11, M8 m12, M8 m13, M8 m14, M8 m15, M8 m16, M128 *mat); -void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, V8 v9, V8 v10, V8 v11, V8 v12, V8 v13, V8 v14, V8 v15, V8 v16, V128 *vec); -void affinecomM8to128(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff8 aff9, Aff8 aff10, Aff8 aff11, Aff8 aff12, Aff8 aff13, Aff8 aff14, Aff8 aff15, Aff8 aff16, Aff128 *aff); -void MatrixcomM16to128(M16 m1, M16 m2, M16 m3, M16 m4, M16 m5, M16 m6, M16 m7, M16 m8, M128 *mat); -void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, V16 v8, V128 *vec); -void affinecomM16to128(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, Aff16 aff5, Aff16 aff6, Aff16 aff7, Aff16 aff8, Aff128 *aff); +void affinecomM32to128(Aff32 aff1, Aff32 aff2, Aff32 aff3, Aff32 aff4, + Aff128 *aff); +void MatrixcomM8to128(M8 m1, M8 m2, M8 m3, M8 m4, M8 m5, M8 m6, M8 m7, M8 m8, + M8 m9, M8 m10, M8 m11, M8 m12, M8 m13, M8 m14, M8 m15, + M8 m16, M128 *mat); +void VectorcomV8to128(V8 v1, V8 v2, V8 v3, V8 v4, V8 v5, V8 v6, V8 v7, V8 v8, + V8 v9, V8 v10, V8 v11, V8 v12, V8 v13, V8 v14, V8 v15, + V8 v16, V128 *vec); +void affinecomM8to128(Aff8 aff1, Aff8 aff2, Aff8 aff3, Aff8 aff4, Aff8 aff5, + Aff8 aff6, Aff8 aff7, Aff8 aff8, Aff8 aff9, Aff8 aff10, + Aff8 aff11, Aff8 aff12, Aff8 aff13, Aff8 aff14, + Aff8 aff15, Aff8 aff16, Aff128 *aff); +void MatrixcomM16to128(M16 m1, M16 m2, M16 m3, M16 m4, M16 m5, M16 m6, M16 m7, + M16 m8, M128 *mat); +void VectorcomV16to128(V16 v1, V16 v2, V16 v3, V16 v4, V16 v5, V16 v6, V16 v7, + V16 v8, V128 *vec); +void affinecomM16to128(Aff16 aff1, Aff16 aff2, Aff16 aff3, Aff16 aff4, + Aff16 aff5, Aff16 aff6, Aff16 aff7, Aff16 aff8, + Aff128 *aff); /* * 256-bit Matrix operation @@ -311,4 +327,4 @@ void affinemixM256(Aff256 aff, Aff256 preaff_inv, Aff256 *mixaff); } #endif -#endif \ No newline at end of file +#endif diff --git a/crypto/sm4/wb/WBRandom.h b/crypto/sm4/wb/WBRandom.h index 82f4398e5..30b371849 100644 --- a/crypto/sm4/wb/WBRandom.h +++ b/crypto/sm4/wb/WBRandom.h @@ -22,4 +22,4 @@ static ossl_inline unsigned int cus_random() return ret; } -#endif // _WBRANDOM_H_ \ No newline at end of file +#endif /* _WBRANDOM_H_ */ diff --git a/crypto/sm4/wb/WSISE-wbsm4.c b/crypto/sm4/wb/WSISE-wbsm4.c index fe0860db7..7f777cd75 100644 --- a/crypto/sm4/wb/WSISE-wbsm4.c +++ b/crypto/sm4/wb/WSISE-wbsm4.c @@ -294,8 +294,7 @@ void wbsm4_wsise_set_key(const uint8_t *key, wbsm4_wsise_key *wbsm4_key) uint8_t *p = (uint8_t *)wbsm4_key; uint8_t *table = (uint8_t *)&wbsm4_key->Table; uint8_t *end = p + sizeof(wbsm4_wsise_key); - while (p < table) - { + while (p < table) { uint8_t t; t = p[0]; p[0] = p[3]; @@ -309,8 +308,7 @@ void wbsm4_wsise_set_key(const uint8_t *key, wbsm4_wsise_key *wbsm4_key) } p = table; - while (p < end) - { + while (p < end) { uint8_t t; t = p[0]; p[0] = p[7]; @@ -344,8 +342,7 @@ void wbsm4_wsise_export_key(const wbsm4_wsise_key *wbsm4_key, uint8_t *key) uint8_t *p = (uint8_t *)out; uint8_t *table = (uint8_t *)&out->Table; uint8_t *end = p + sizeof(wbsm4_wsise_key); - while (p < table) - { + while (p < table) { uint8_t t; t = p[0]; p[0] = p[3]; @@ -359,8 +356,7 @@ void wbsm4_wsise_export_key(const wbsm4_wsise_key *wbsm4_key, uint8_t *key) } p = table; - while (p < end) - { + while (p < end) { uint8_t t; t = p[0]; p[0] = p[7]; @@ -402,34 +398,34 @@ void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key) uint32_t SK[32]; wbsm4_sm4_setkey(SK, sm4_key); - for (i = 0; i < 36; i++) - { - // affine P + for (i = 0; i < 36; i++) { + /* affine P */ genaffinepairM32(&P[i], &P_inv[i]); } - for (i = 0; i < 32; i++) - { - // affine E - for (j = 0; j < 4; j++) - { + for (i = 0; i < 32; i++) { + /* affine E */ + for (j = 0; j < 4; j++) { genaffinepairM8(&Eij[i][j], &Eij_inv[i][j]); genaffinepairM8(&Qij[i][j], &Qij_inv[i][j]); } - // combine 4 E8 to 1 E32 - affinecomM8to32(Eij_inv[i][0], Eij_inv[i][1], Eij_inv[i][2], Eij_inv[i][3], &Ei_inv[i]); + /* combine 4 E8 to 1 E32 */ + affinecomM8to32(Eij_inv[i][0], Eij_inv[i][1], Eij_inv[i][2], + Eij_inv[i][3], &Ei_inv[i]); - // affine M + /* affine M */ affinemixM32(Ei_inv[i], P_inv[i + 1], &wbsm4_key->M[i][0]); affinemixM32(Ei_inv[i], P_inv[i + 2], &wbsm4_key->M[i][1]); affinemixM32(Ei_inv[i], P_inv[i + 3], &wbsm4_key->M[i][2]); - // affine Q - affinecomM8to64(Qij[i][0], Qij[i][1], Qij[i][2], Qij[i][3], Qij[i][0], Qij[i][1], Qij[i][2], Qij[i][3], &Q[i]); - affinecomM8to32(Qij_inv[i][0], Qij_inv[i][1], Qij_inv[i][2], Qij_inv[i][3], &Q_inv[i]); + /* affine Q */ + affinecomM8to64(Qij[i][0], Qij[i][1], Qij[i][2], Qij[i][3], Qij[i][0], + Qij[i][1], Qij[i][2], Qij[i][3], &Q[i]); + affinecomM8to32(Qij_inv[i][0], Qij_inv[i][1], Qij_inv[i][2], + Qij_inv[i][3], &Q_inv[i]); - // affine C D, C for Xi0, D for T(Xi1+Xi2+Xi3+rk) + /* affine C D, C for Xi0, D for T(Xi1+Xi2+Xi3+rk) */ affinemixM32(P[i + 4], P_inv[i], &wbsm4_key->C[i]); affinemixM32(P[i + 4], Q_inv[i], &wbsm4_key->D[i]); temp_u32 = cus_random(); @@ -437,31 +433,28 @@ void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key) wbsm4_key->D[i].Vec.V ^= P[i + 4].Vec.V ^ temp_u32; } - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { V64 Q_constant[3]; - for (j = 0; j < 3; j++) - { + for (j = 0; j < 3; j++) { randV64(&Q_constant[j]); } uint8_t randnum[4]; V8 randvec[4]; - for (j = 0; j < 4; j++) - { + for (j = 0; j < 4; j++) { randnum[j] = cus_random() % 2; randV8(&randvec[j]); } - for (x = 0; x < 256; x++) - { - for (j = 0; j < 4; j++) - { + for (x = 0; x < 256; x++) { + for (j = 0; j < 4; j++) { temp_u8 = affineU8(Eij[i][j], x); if (randnum[j] == 0) - temp_u16 = (SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)] << 8) | SBOX[temp_u8 ^ randvec[j].V]; + temp_u16 = (SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)] + << 8) | SBOX[temp_u8 ^ randvec[j].V]; else - temp_u16 = (SBOX[temp_u8 ^ randvec[j].V] << 8) | SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)]; + temp_u16 = (SBOX[temp_u8 ^ randvec[j].V] << 8) | + SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)]; temp_u64 = ((uint64_t)temp_u16) << (48 - 16 * j); temp_u64 = MatMulNumM64(L_matrix, temp_u64); if (randnum[j] == 0) @@ -470,17 +463,16 @@ void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key) temp_u64 = MatMulNumM64(SR1, temp_u64); wbsm4_key->Table[i][j][x] = MatMulNumM64(Q[i].Mat, temp_u64); } - for (j = 0; j < 3; j++) - { + for (j = 0; j < 3; j++) { wbsm4_key->Table[i][j][x] ^= Q_constant[j].V; } - wbsm4_key->Table[i][3][x] ^= Q[i].Vec.V ^ Q_constant[0].V ^ Q_constant[1].V ^ Q_constant[2].V; + wbsm4_key->Table[i][3][x] ^= Q[i].Vec.V ^ Q_constant[0].V ^ + Q_constant[1].V ^ Q_constant[2].V; } } - // external encoding - for (i = 0; i < 4; i++) - { + /* external encoding */ + for (i = 0; i < 4; i++) { wbsm4_key->SE[i].Mat = P[i].Mat; wbsm4_key->SE[i].Vec = P[i].Vec; @@ -489,7 +481,8 @@ void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key) } } -void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_wsise_key *wbsm4_key) +void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], + const wbsm4_wsise_key *wbsm4_key) { int i; uint32_t x0, x1, x2, x3, x4; @@ -506,13 +499,15 @@ void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], const wb x2 = affineU32(wbsm4_key->SE[2], x2); x3 = affineU32(wbsm4_key->SE[3], x3); - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { xt1 = affineU32(wbsm4_key->M[i][0], x1); xt2 = affineU32(wbsm4_key->M[i][1], x2); xt3 = affineU32(wbsm4_key->M[i][2], x3); x4 = xt1 ^ xt2 ^ xt3; - xx4 = wbsm4_key->Table[i][0][(x4 >> 24) & 0xff] ^ wbsm4_key->Table[i][1][(x4 >> 16) & 0xff] ^ wbsm4_key->Table[i][2][(x4 >> 8) & 0xff] ^ wbsm4_key->Table[i][3][x4 & 0xff]; + xx4 = wbsm4_key->Table[i][0][(x4 >> 24) & 0xff] ^ + wbsm4_key->Table[i][1][(x4 >> 16) & 0xff] ^ + wbsm4_key->Table[i][2][(x4 >> 8) & 0xff] ^ + wbsm4_key->Table[i][3][x4 & 0xff]; x4 = xx4 >> 32; xt0 = affineU32(wbsm4_key->C[i], x0); xt4 = affineU32(wbsm4_key->D[i], x4); @@ -533,4 +528,4 @@ void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], const wb PUT32(x2, OUT + 4); PUT32(x1, OUT + 8); PUT32(x0, OUT + 12); -} \ No newline at end of file +} diff --git a/crypto/sm4/wb/Xiao-Lai-wbsm4.c b/crypto/sm4/wb/Xiao-Lai-wbsm4.c index bea121d8f..f3cc7acb1 100644 --- a/crypto/sm4/wb/Xiao-Lai-wbsm4.c +++ b/crypto/sm4/wb/Xiao-Lai-wbsm4.c @@ -26,7 +26,7 @@ (ct)[2] = (uint8_t)((st) >> 8); \ (ct)[3] = (uint8_t)(st) -static uint8_t SBOX[256]={ +static uint8_t SBOX[256] = { 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, @@ -93,7 +93,8 @@ static M32 L_matrix = { .M[28] = 0x202080A, .M[29] = 0x1010405, .M[30] = 0x80808202, - .M[31] = 0x40404101}; + .M[31] = 0x40404101 +}; void wbsm4_xiaolai_set_key(const uint8_t *key, wbsm4_xiaolai_key *wbsm4_key) { @@ -105,8 +106,7 @@ void wbsm4_xiaolai_set_key(const uint8_t *key, wbsm4_xiaolai_key *wbsm4_key) uint8_t *p = (uint8_t *)wbsm4_key; uint8_t *end = p + sizeof(wbsm4_xiaolai_key); - while (p < end) - { + while (p < end) { uint8_t t; t = p[0]; p[0] = p[3]; @@ -131,8 +131,7 @@ void wbsm4_xiaolai_export_key(const wbsm4_xiaolai_key *wbsm4_key, uint8_t *key) uint8_t *p = (uint8_t *)out; uint8_t *end = p + sizeof(wbsm4_xiaolai_key); - while (p < end) - { + while (p < end) { uint8_t t; t = p[0]; p[0] = p[3]; @@ -160,32 +159,30 @@ void wbsm4_xiaolai_gen(const uint8_t *sm4_key, wbsm4_xiaolai_key *wbsm4_key) uint32_t SK[32]; wbsm4_sm4_setkey(SK, sm4_key); - for (i = 0; i < 36; i++) - { - // affine P + for (i = 0; i < 36; i++) { + /* affine P */ genaffinepairM32(&P[i], &P_inv[i]); } - for (i = 0; i < 32; i++) - { - // affine E - for (j = 0; j < 4; j++) - { + for (i = 0; i < 32; i++) { + /* affine E */ + for (j = 0; j < 4; j++) { genaffinepairM8(&Eij[i][j], &Eij_inv[i][j]); } - // combine 4 E8 to 1 E32 - affinecomM8to32(Eij_inv[i][0], Eij_inv[i][1], Eij_inv[i][2], Eij_inv[i][3], &Ei_inv[i]); + /* combine 4 E8 to 1 E32 */ + affinecomM8to32(Eij_inv[i][0], Eij_inv[i][1], Eij_inv[i][2], + Eij_inv[i][3], &Ei_inv[i]); - // affine M + /* affine M */ affinemixM32(Ei_inv[i], P_inv[i + 1], &wbsm4_key->M[i][0]); affinemixM32(Ei_inv[i], P_inv[i + 2], &wbsm4_key->M[i][1]); affinemixM32(Ei_inv[i], P_inv[i + 3], &wbsm4_key->M[i][2]); - // affine Q + /* affine Q */ genaffinepairM32(&Q[i], &Q_inv[i]); - // affine C D, C for Xi0, D for T(Xi1+Xi2+Xi3+rk) + /* affine C D, C for Xi0, D for T(Xi1+Xi2+Xi3+rk) */ affinemixM32(P[i + 4], P_inv[i], &wbsm4_key->C[i]); affinemixM32(P[i + 4], Q_inv[i], &wbsm4_key->D[i]); uint32_t temp_u32 = cus_random(); @@ -193,38 +190,33 @@ void wbsm4_xiaolai_gen(const uint8_t *sm4_key, wbsm4_xiaolai_key *wbsm4_key) wbsm4_key->D[i].Vec.V ^= P[i + 4].Vec.V ^ temp_u32; } - for (i = 0; i < 32; i++) - { - // combine QL + for (i = 0; i < 32; i++) { + /* combine QL */ M32 QL; MatMulMatM32(Q[i].Mat, L_matrix, &QL); uint32_t Q_constant[3] = {0}; - for (j = 0; j < 3; j++) - { + for (j = 0; j < 3; j++) { Q_constant[j] = cus_random(); } - for (x = 0; x < 256; x++) - { - for (j = 0; j < 4; j++) - { + for (x = 0; x < 256; x++) { + for (j = 0; j < 4; j++) { uint8_t temp_u8 = affineU8(Eij[i][j], x); temp_u8 = SBOX[temp_u8 ^ ((SK[i] >> (24 - j * 8)) & 0xff)]; uint32_t temp_32 = temp_u8 << (24 - j * 8); wbsm4_key->Table[i][j][x] = MatMulNumM32(QL, temp_32); } - for (j = 0; j < 3; j++) - { + for (j = 0; j < 3; j++) { wbsm4_key->Table[i][j][x] ^= Q_constant[j]; } - wbsm4_key->Table[i][3][x] ^= Q[i].Vec.V ^ Q_constant[0] ^ Q_constant[1] ^ Q_constant[2]; + wbsm4_key->Table[i][3][x] ^= Q[i].Vec.V ^ Q_constant[0] ^ + Q_constant[1] ^ Q_constant[2]; } } - // external encoding - for (i = 0; i < 4; i++) - { + /* external encoding */ + for (i = 0; i < 4; i++) { wbsm4_key->SE[i].Mat = P[i].Mat; wbsm4_key->SE[i].Vec = P[i].Vec; @@ -233,7 +225,8 @@ void wbsm4_xiaolai_gen(const uint8_t *sm4_key, wbsm4_xiaolai_key *wbsm4_key) } } -void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_xiaolai_key *wbsm4_key) +void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], + const wbsm4_xiaolai_key *wbsm4_key) { int i; uint32_t x0, x1, x2, x3, x4; @@ -248,13 +241,15 @@ void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], const x2 = affineU32(wbsm4_key->SE[2], x2); x3 = affineU32(wbsm4_key->SE[3], x3); - for (i = 0; i < 32; i++) - { + for (i = 0; i < 32; i++) { xt1 = affineU32(wbsm4_key->M[i][0], x1); xt2 = affineU32(wbsm4_key->M[i][1], x2); xt3 = affineU32(wbsm4_key->M[i][2], x3); x4 = xt1 ^ xt2 ^ xt3; - x4 = wbsm4_key->Table[i][0][(x4 >> 24) & 0xff] ^ wbsm4_key->Table[i][1][(x4 >> 16) & 0xff] ^ wbsm4_key->Table[i][2][(x4 >> 8) & 0xff] ^ wbsm4_key->Table[i][3][x4 & 0xff]; + x4 = wbsm4_key->Table[i][0][(x4 >> 24) & 0xff] ^ + wbsm4_key->Table[i][1][(x4 >> 16) & 0xff] ^ + wbsm4_key->Table[i][2][(x4 >> 8) & 0xff] ^ + wbsm4_key->Table[i][3][x4 & 0xff]; xt0 = affineU32(wbsm4_key->C[i], x0); xt4 = affineU32(wbsm4_key->D[i], x4); x4 = xt0 ^ xt4; @@ -273,4 +268,4 @@ void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], const PUT32(x2, OUT + 4); PUT32(x1, OUT + 8); PUT32(x0, OUT + 12); -} \ No newline at end of file +} diff --git a/crypto/sm4/wb/wbsm4.c b/crypto/sm4/wb/wbsm4.c index 7d0a6aa3c..6fc6da006 100644 --- a/crypto/sm4/wb/wbsm4.c +++ b/crypto/sm4/wb/wbsm4.c @@ -46,10 +46,12 @@ static const uint8_t SM4_S[256] = { 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, 0x89, 0x69, 0x97, 0x4A, 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, - 0xD7, 0xCB, 0x39, 0x48}; + 0xD7, 0xCB, 0x39, 0x48 +}; static const uint32_t FK[4] = { - 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc}; + 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc +}; static const uint32_t CK[32] = { 0x00070E15, 0x1C232A31, 0x383F464D, 0x545B6269, @@ -59,7 +61,8 @@ static const uint32_t CK[32] = { 0xC0C7CED5, 0xDCE3EAF1, 0xF8FF060D, 0x141B2229, 0x30373E45, 0x4C535A61, 0x686F767D, 0x848B9299, 0xA0A7AEB5, 0xBCC3CAD1, 0xD8DFE6ED, 0xF4FB0209, - 0x10171E25, 0x2C333A41, 0x484F565D, 0x646B7279}; + 0x10171E25, 0x2C333A41, 0x484F565D, 0x646B7279 +}; static ossl_inline uint32_t rotl(uint32_t a, uint8_t n) { @@ -75,8 +78,7 @@ void wbsm4_sm4_setkey(uint32_t SK[32], const uint8_t key[16]) K[2] = GET32(key + 8) ^ FK[2]; K[3] = GET32(key + 12) ^ FK[3]; - for (uint32_t i = 0; i != 32; ++i) - { + for (uint32_t i = 0; i != 32; ++i) { uint32_t X = K[(i + 1)] ^ K[(i + 2)] ^ K[(i + 3)] ^ CK[i]; uint32_t t = 0; @@ -89,4 +91,4 @@ void wbsm4_sm4_setkey(uint32_t SK[32], const uint8_t key[16]) K[i + 4] = K[i] ^ t; SK[i] = K[i + 4]; } -} \ No newline at end of file +} diff --git a/include/crypto/wbsm4.h b/include/crypto/wbsm4.h index 8e2246a50..885ac45e7 100644 --- a/include/crypto/wbsm4.h +++ b/include/crypto/wbsm4.h @@ -23,8 +23,7 @@ void wbsm4_sm4_setkey(uint32_t SK[32], const uint8_t key[16]); #pragma pack(push, 1) -typedef struct -{ +typedef struct { Aff32 M[32][3]; Aff32 C[32]; Aff32 D[32]; @@ -35,13 +34,14 @@ typedef struct #pragma pack(pop) void wbsm4_xiaolai_gen(const uint8_t *sm4_key, wbsm4_xiaolai_key *wbsm4_key); -void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_xiaolai_key *wbsm4_key); +void wbsm4_xiaolai_encrypt(const unsigned char IN[], unsigned char OUT[], + const wbsm4_xiaolai_key *wbsm4_key); void wbsm4_xiaolai_set_key(const uint8_t *key, wbsm4_xiaolai_key *wbsm4_key); -void wbsm4_xiaolai_export_key(const wbsm4_xiaolai_key *wbsm4_key, uint8_t *key); +void wbsm4_xiaolai_export_key(const wbsm4_xiaolai_key *wbsm4_key, + uint8_t *key); #pragma pack(push, 1) -typedef struct -{ +typedef struct { Aff32 SE[4]; Aff32 FE[4]; uint32_t TD[32][4][4][256]; @@ -50,13 +50,13 @@ typedef struct #pragma pack(pop) void wbsm4_baiwu_gen(const uint8_t *sm4_key, wbsm4_baiwu_key *wbsm4_key); -void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_baiwu_key *wbsm4_key); +void wbsm4_baiwu_encrypt(const unsigned char IN[], unsigned char OUT[], + const wbsm4_baiwu_key *wbsm4_key); void wbsm4_baiwu_set_key(const uint8_t *key, wbsm4_baiwu_key *wbsm4_key); void wbsm4_baiwu_export_key(const wbsm4_baiwu_key *wbsm4_key, uint8_t *key); #pragma pack(push, 1) -typedef struct -{ +typedef struct { Aff32 M[32][3]; Aff32 C[32]; Aff32 D[32]; @@ -67,8 +67,9 @@ typedef struct #pragma pack(pop) void wbsm4_wsise_gen(const uint8_t *sm4_key, wbsm4_wsise_key *wbsm4_key); -void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], const wbsm4_wsise_key *wbsm4_key); +void wbsm4_wsise_encrypt(const unsigned char IN[], unsigned char OUT[], + const wbsm4_wsise_key *wbsm4_key); void wbsm4_wsise_set_key(const uint8_t *key, wbsm4_wsise_key *wbsm4_key); void wbsm4_wsise_export_key(const wbsm4_wsise_key *wbsm4_key, uint8_t *key); -#endif // _WBSM4_H_ \ No newline at end of file +#endif /* _WBSM4_H_ */ diff --git a/include/crypto/wbstructure.h b/include/crypto/wbstructure.h index 3aad54091..b8fa09d8b 100644 --- a/include/crypto/wbstructure.h +++ b/include/crypto/wbstructure.h @@ -14,123 +14,102 @@ #include -// 4 bits -typedef struct M4 -{ +/* 4 bits */ +typedef struct M4 { uint8_t M[4]; } M4; -typedef struct V4 -{ +typedef struct V4 { uint8_t V; } V4; -typedef struct Aff4 -{ +typedef struct Aff4 { M4 Mat; V4 Vec; } Aff4; -// 8 bits -typedef struct M8 -{ +/* 8 bits */ +typedef struct M8 { uint8_t M[8]; } M8; -typedef struct V8 -{ +typedef struct V8 { uint8_t V; } V8; -typedef struct Aff8 -{ +typedef struct Aff8 { M8 Mat; V8 Vec; } Aff8; -// 16 bits -typedef struct M16 -{ +/* 16 bits */ +typedef struct M16 { uint16_t M[16]; } M16; -typedef struct V16 -{ +typedef struct V16 { uint16_t V; } V16; -typedef struct Aff16 -{ +typedef struct Aff16 { M16 Mat; V16 Vec; } Aff16; -// 32 bits -typedef struct M32 -{ +/* 32 bits */ +typedef struct M32 { uint32_t M[32]; } M32; -typedef struct V32 -{ +typedef struct V32 { uint32_t V; } V32; -typedef struct Aff32 -{ +typedef struct Aff32 { M32 Mat; V32 Vec; } Aff32; -// 64 bits -typedef struct M64 -{ +/* 64 bits */ +typedef struct M64 { uint64_t M[64]; } M64; -typedef struct V64 -{ +typedef struct V64 { uint64_t V; } V64; -typedef struct Aff64 -{ +typedef struct Aff64 { M64 Mat; V64 Vec; } Aff64; -// 128 bits -typedef struct M128 -{ +/* 128 bits */ +typedef struct M128 { uint64_t M[128][2]; } M128; -typedef struct V128 -{ +typedef struct V128 { uint64_t V[2]; } V128; -typedef struct Aff128 -{ +typedef struct Aff128 { M128 Mat; V128 Vec; } Aff128; -// 256 bits -typedef struct M256 -{ +/* 256 bits */ +typedef struct M256 { uint64_t M[256][4]; } M256; -typedef struct V256 -{ +typedef struct V256 { uint64_t V[4]; } V256; -typedef struct Aff256 -{ +typedef struct Aff256 { M256 Mat; V256 Vec; } Aff256; -#endif \ No newline at end of file +#endif diff --git a/providers/implementations/ciphers/cipher_wbsm4.c b/providers/implementations/ciphers/cipher_wbsm4.c index e943fa7e8..14bc28142 100644 --- a/providers/implementations/ciphers/cipher_wbsm4.c +++ b/providers/implementations/ciphers/cipher_wbsm4.c @@ -14,7 +14,7 @@ #include "prov/implementations.h" #include "prov/providercommon.h" -// xiaolai +/* xiaolai */ static OSSL_FUNC_cipher_freectx_fn wbsm4_xiaolai_freectx; static OSSL_FUNC_cipher_dupctx_fn wbsm4_xiaolai_dupctx; @@ -35,8 +35,7 @@ static void *wbsm4_xiaolai_dupctx(void *ctx) return NULL; ret = OPENSSL_malloc(sizeof(*ret)); - if (ret == NULL) - { + if (ret == NULL) { ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return NULL; } @@ -46,17 +45,22 @@ static void *wbsm4_xiaolai_dupctx(void *ctx) } /* ossl_wbsm4_xiaolai1225984ecb_functions */ -IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ecb, ECB, 0, 1225984, 128, 0, block); +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ecb, ECB, 0, 1225984, + 128, 0, block); /* ossl_wbsm4_xiaolai1225984cbc_functions */ -IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, cbc, CBC, 0, 1225984, 128, 128, block); +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, cbc, CBC, 0, 1225984, + 128, 128, block); /* ossl_wbsm4_xiaolai1225984ctr_functions */ -IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ctr, CTR, 0, 1225984, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ctr, CTR, 0, 1225984, + 8, 128, stream); /* ossl_wbsm4_xiaolai1225984ofb128_functions */ -IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ofb128, OFB, 0, 1225984, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, ofb128, OFB, 0, 1225984, + 8, 128, stream); /* ossl_wbsm4_xiaolai1225984cfb128_functions */ -IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, cfb128, CFB, 0, 1225984, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_xiaolai, WBSM4_XIAOLAI, cfb128, CFB, 0, 1225984, + 8, 128, stream); -// baiwu +/* baiwu */ static OSSL_FUNC_cipher_freectx_fn wbsm4_baiwu_freectx; static OSSL_FUNC_cipher_dupctx_fn wbsm4_baiwu_dupctx; @@ -77,8 +81,7 @@ static void *wbsm4_baiwu_dupctx(void *ctx) return NULL; ret = OPENSSL_malloc(sizeof(*ret)); - if (ret == NULL) - { + if (ret == NULL) { ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return NULL; } @@ -88,17 +91,22 @@ static void *wbsm4_baiwu_dupctx(void *ctx) } /* ossl_wbsm4_baiwu272638208ecb_functions */ -IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ecb, ECB, 0, 272638208, 128, 0, block); +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ecb, ECB, 0, 272638208, + 128, 0, block); /* ossl_wbsm4_baiwu272638208cbc_functions */ -IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, cbc, CBC, 0, 272638208, 128, 128, block); +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, cbc, CBC, 0, 272638208, + 128, 128, block); /* ossl_wbsm4_baiwu272638208ctr_functions */ -IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ctr, CTR, 0, 272638208, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ctr, CTR, 0, 272638208, + 8, 128, stream); /* ossl_wbsm4_baiwu272638208ofb128_functions */ -IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ofb128, OFB, 0, 272638208, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, ofb128, OFB, 0, 272638208, + 8, 128, stream); /* ossl_wbsm4_baiwu272638208cfb128_functions */ -IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, cfb128, CFB, 0, 272638208, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_baiwu, WBSM4_BAIWU, cfb128, CFB, 0, 272638208, + 8, 128, stream); -// wsise +/* wsise */ static OSSL_FUNC_cipher_freectx_fn wbsm4_wsise_freectx; static OSSL_FUNC_cipher_dupctx_fn wbsm4_wsise_dupctx; @@ -119,8 +127,7 @@ static void *wbsm4_wsise_dupctx(void *ctx) return NULL; ret = OPENSSL_malloc(sizeof(*ret)); - if (ret == NULL) - { + if (ret == NULL) { ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return NULL; } @@ -130,12 +137,17 @@ static void *wbsm4_wsise_dupctx(void *ctx) } /* ossl_wbsm4_wsise2274560ecb_functions */ -IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ecb, ECB, 0, 2274560, 128, 0, block); +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ecb, ECB, 0, 2274560, + 128, 0, block); /* ossl_wbsm4_wsise2274560cbc_functions */ -IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, cbc, CBC, 0, 2274560, 128, 128, block); +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, cbc, CBC, 0, 2274560, + 128, 128, block); /* ossl_wbsm4_wsise2274560ctr_functions */ -IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ctr, CTR, 0, 2274560, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ctr, CTR, 0, 2274560, + 8, 128, stream); /* ossl_wbsm4_wsise2274560ofb128_functions */ -IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ofb128, OFB, 0, 2274560, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, ofb128, OFB, 0, 2274560, + 8, 128, stream); /* ossl_wbsm4_wsise2274560cfb128_functions */ -IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, cfb128, CFB, 0, 2274560, 8, 128, stream); +IMPLEMENT_generic_cipher(wbsm4_wsise, WBSM4_WSISE, cfb128, CFB, 0, 2274560, + 8, 128, stream); diff --git a/providers/implementations/ciphers/cipher_wbsm4.h b/providers/implementations/ciphers/cipher_wbsm4.h index c602dd5cc..cd5cb3ec4 100644 --- a/providers/implementations/ciphers/cipher_wbsm4.h +++ b/providers/implementations/ciphers/cipher_wbsm4.h @@ -11,12 +11,10 @@ #include "prov/ciphercommon.h" #include "crypto/wbsm4.h" -// xiaolai -typedef struct -{ +/* xiaolai */ +typedef struct { PROV_CIPHER_CTX base; /* Must be first */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_xiaolai_key ks; } ks; @@ -28,12 +26,10 @@ const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_ctr(size_t keybits); const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_ofb128(size_t keybits); const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_cfb128(size_t keybits); -// baiwu -typedef struct -{ +/* baiwu */ +typedef struct { PROV_CIPHER_CTX base; /* Must be first */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_baiwu_key ks; } ks; @@ -45,12 +41,10 @@ const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_ctr(size_t keybits); const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_ofb128(size_t keybits); const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_cfb128(size_t keybits); -// wsise -typedef struct -{ +/* wsise */ +typedef struct { PROV_CIPHER_CTX base; /* Must be first */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_wsise_key ks; } ks; diff --git a/providers/implementations/ciphers/cipher_wbsm4_ccm.c b/providers/implementations/ciphers/cipher_wbsm4_ccm.c index 2d207a5c9..cc3766eb4 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_ccm.c +++ b/providers/implementations/ciphers/cipher_wbsm4_ccm.c @@ -14,7 +14,7 @@ #include "prov/implementations.h" #include "prov/providercommon.h" -// xiaolai +/* xiaolai */ static void *wbsm4_xiaolai_ccm_newctx(void *provctx, size_t keybits) { PROV_WBSM4_XIAOLAI_CCM_CTX *ctx; @@ -24,7 +24,8 @@ static void *wbsm4_xiaolai_ccm_newctx(void *provctx, size_t keybits) ctx = OPENSSL_zalloc(sizeof(*ctx)); if (ctx != NULL) - ossl_ccm_initctx(&ctx->base, keybits, ossl_prov_wbsm4_xiaolai_hw_ccm(keybits)); + ossl_ccm_initctx(&ctx->base, keybits, + ossl_prov_wbsm4_xiaolai_hw_ccm(keybits)); return ctx; } @@ -39,7 +40,7 @@ static void wbsm4_xiaolai_ccm_freectx(void *vctx) /* ossl_wbsm4_xiaolai1225984ccm_functions */ IMPLEMENT_aead_cipher(wbsm4_xiaolai, ccm, CCM, AEAD_FLAGS, 1225984, 8, 96); -// baiwu +/* baiwu */ static void *wbsm4_baiwu_ccm_newctx(void *provctx, size_t keybits) { PROV_WBSM4_BAIWU_CCM_CTX *ctx; @@ -49,7 +50,8 @@ static void *wbsm4_baiwu_ccm_newctx(void *provctx, size_t keybits) ctx = OPENSSL_zalloc(sizeof(*ctx)); if (ctx != NULL) - ossl_ccm_initctx(&ctx->base, keybits, ossl_prov_wbsm4_baiwu_hw_ccm(keybits)); + ossl_ccm_initctx(&ctx->base, keybits, + ossl_prov_wbsm4_baiwu_hw_ccm(keybits)); return ctx; } @@ -64,7 +66,7 @@ static void wbsm4_baiwu_ccm_freectx(void *vctx) /* ossl_wbsm4_baiwu272638208ccm_functions */ IMPLEMENT_aead_cipher(wbsm4_baiwu, ccm, CCM, AEAD_FLAGS, 272638208, 8, 96); -// wsise +/* wsise */ static void *wbsm4_wsise_ccm_newctx(void *provctx, size_t keybits) { PROV_WBSM4_WSISE_CCM_CTX *ctx; @@ -74,7 +76,8 @@ static void *wbsm4_wsise_ccm_newctx(void *provctx, size_t keybits) ctx = OPENSSL_zalloc(sizeof(*ctx)); if (ctx != NULL) - ossl_ccm_initctx(&ctx->base, keybits, ossl_prov_wbsm4_wsise_hw_ccm(keybits)); + ossl_ccm_initctx(&ctx->base, keybits, + ossl_prov_wbsm4_wsise_hw_ccm(keybits)); return ctx; } diff --git a/providers/implementations/ciphers/cipher_wbsm4_ccm.h b/providers/implementations/ciphers/cipher_wbsm4_ccm.h index 57e63f1e7..3b344d251 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_ccm.h +++ b/providers/implementations/ciphers/cipher_wbsm4_ccm.h @@ -12,12 +12,10 @@ #include "prov/ciphercommon.h" #include "prov/ciphercommon_ccm.h" -// xiaolai -typedef struct prov_wbsm4_xiaolai_ccm_ctx_st -{ +/* xiaolai */ +typedef struct prov_wbsm4_xiaolai_ccm_ctx_st { PROV_CCM_CTX base; /* must be first entry in struct */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_xiaolai_key ks; } ks; /* WBSM4 key schedule to use */ @@ -25,12 +23,10 @@ typedef struct prov_wbsm4_xiaolai_ccm_ctx_st const PROV_CCM_HW *ossl_prov_wbsm4_xiaolai_hw_ccm(size_t keybits); -// baiwu -typedef struct prov_wbsm4_baiwu_ccm_ctx_st -{ +/* baiwu */ +typedef struct prov_wbsm4_baiwu_ccm_ctx_st { PROV_CCM_CTX base; /* must be first entry in struct */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_baiwu_key ks; } ks; /* WBSM4 key schedule to use */ @@ -38,12 +34,10 @@ typedef struct prov_wbsm4_baiwu_ccm_ctx_st const PROV_CCM_HW *ossl_prov_wbsm4_baiwu_hw_ccm(size_t keybits); -// wsise -typedef struct prov_wbsm4_wsise_ccm_ctx_st -{ +/* wsise */ +typedef struct prov_wbsm4_wsise_ccm_ctx_st { PROV_CCM_CTX base; /* must be first entry in struct */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_wsise_key ks; } ks; /* WBSM4 key schedule to use */ diff --git a/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c b/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c index 4c90b201b..dcf04ad6c 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c +++ b/providers/implementations/ciphers/cipher_wbsm4_ccm_hw.c @@ -12,9 +12,9 @@ #include "cipher_wbsm4_ccm.h" -// xiaolai -static int wbsm4_xiaolai_ccm_initkey(PROV_CCM_CTX *ctx, const unsigned char *key, - size_t keylen) +/* xiaolai */ +static int wbsm4_xiaolai_ccm_initkey(PROV_CCM_CTX *ctx, + const unsigned char *key, size_t keylen) { PROV_WBSM4_XIAOLAI_CCM_CTX *actx = (PROV_WBSM4_XIAOLAI_CCM_CTX *)ctx; wbsm4_xiaolai_key *ks = &actx->ks.ks; @@ -35,14 +35,15 @@ static const PROV_CCM_HW wbsm4_xiaolai_ccm = { ossl_ccm_generic_setaad, ossl_ccm_generic_auth_encrypt, ossl_ccm_generic_auth_decrypt, - ossl_ccm_generic_gettag}; + ossl_ccm_generic_gettag +}; const PROV_CCM_HW *ossl_prov_wbsm4_xiaolai_hw_ccm(size_t keybits) { return &wbsm4_xiaolai_ccm; } -// baiwu +/* baiwu */ static int wbsm4_baiwu_ccm_initkey(PROV_CCM_CTX *ctx, const unsigned char *key, size_t keylen) { @@ -65,14 +66,15 @@ static const PROV_CCM_HW wbsm4_baiwu_ccm = { ossl_ccm_generic_setaad, ossl_ccm_generic_auth_encrypt, ossl_ccm_generic_auth_decrypt, - ossl_ccm_generic_gettag}; + ossl_ccm_generic_gettag +}; const PROV_CCM_HW *ossl_prov_wbsm4_baiwu_hw_ccm(size_t keybits) { return &wbsm4_baiwu_ccm; } -// wsise +/* wsise */ static int wbsm4_wsise_ccm_initkey(PROV_CCM_CTX *ctx, const unsigned char *key, size_t keylen) { @@ -95,7 +97,8 @@ static const PROV_CCM_HW wbsm4_wsise_ccm = { ossl_ccm_generic_setaad, ossl_ccm_generic_auth_encrypt, ossl_ccm_generic_auth_decrypt, - ossl_ccm_generic_gettag}; + ossl_ccm_generic_gettag +}; const PROV_CCM_HW *ossl_prov_wbsm4_wsise_hw_ccm(size_t keybits) { diff --git a/providers/implementations/ciphers/cipher_wbsm4_gcm.c b/providers/implementations/ciphers/cipher_wbsm4_gcm.c index bfb05e3ef..3a8c379f4 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_gcm.c +++ b/providers/implementations/ciphers/cipher_wbsm4_gcm.c @@ -14,7 +14,7 @@ #include "prov/implementations.h" #include "prov/providercommon.h" -// xiaolai +/* xiaolai */ static void *wbsm4_xiaolai_gcm_newctx(void *provctx, size_t keybits) { PROV_WBSM4_XIAOLAI_GCM_CTX *ctx; @@ -40,7 +40,7 @@ static void wbsm4_xiaolai_gcm_freectx(void *vctx) /* ossl_wbsm4_xiaolai1225984gcm_functions */ IMPLEMENT_aead_cipher(wbsm4_xiaolai, gcm, GCM, AEAD_FLAGS, 1225984, 8, 96); -// baiwu +/* baiwu */ static void *wbsm4_baiwu_gcm_newctx(void *provctx, size_t keybits) { PROV_WBSM4_BAIWU_GCM_CTX *ctx; @@ -66,7 +66,7 @@ static void wbsm4_baiwu_gcm_freectx(void *vctx) /* ossl_wbsm4_baiwu272638208gcm_functions */ IMPLEMENT_aead_cipher(wbsm4_baiwu, gcm, GCM, AEAD_FLAGS, 272638208, 8, 96); -// wsise +/* wsise */ static void *wbsm4_wsise_gcm_newctx(void *provctx, size_t keybits) { PROV_WBSM4_WSISE_GCM_CTX *ctx; diff --git a/providers/implementations/ciphers/cipher_wbsm4_gcm.h b/providers/implementations/ciphers/cipher_wbsm4_gcm.h index 8c2ed95b8..7ed1127d4 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_gcm.h +++ b/providers/implementations/ciphers/cipher_wbsm4_gcm.h @@ -12,12 +12,10 @@ #include "prov/ciphercommon.h" #include "prov/ciphercommon_gcm.h" -// xiaolai -typedef struct prov_wbsm4_xiaolai_gcm_ctx_st -{ +/* xiaolai */ +typedef struct prov_wbsm4_xiaolai_gcm_ctx_st { PROV_GCM_CTX base; /* must be first entry in struct */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_xiaolai_key ks; } ks; /* WBSM4 key schedule to use */ @@ -25,12 +23,10 @@ typedef struct prov_wbsm4_xiaolai_gcm_ctx_st const PROV_GCM_HW *ossl_prov_wbsm4_xiaolai_hw_gcm(size_t keybits); -// baiwu -typedef struct prov_wbsm4_baiwu_gcm_ctx_st -{ +/* baiwu */ +typedef struct prov_wbsm4_baiwu_gcm_ctx_st { PROV_GCM_CTX base; /* must be first entry in struct */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_baiwu_key ks; } ks; /* WBSM4 key schedule to use */ @@ -38,12 +34,10 @@ typedef struct prov_wbsm4_baiwu_gcm_ctx_st const PROV_GCM_HW *ossl_prov_wbsm4_baiwu_hw_gcm(size_t keybits); -// wsise -typedef struct prov_wbsm4_wsise_gcm_ctx_st -{ +/* wsise */ +typedef struct prov_wbsm4_wsise_gcm_ctx_st { PROV_GCM_CTX base; /* must be first entry in struct */ - union - { + union { OSSL_UNION_ALIGN; wbsm4_wsise_key ks; } ks; /* WBSM4 key schedule to use */ diff --git a/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c b/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c index 4bbce5c19..43bcf94bb 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c +++ b/providers/implementations/ciphers/cipher_wbsm4_gcm_hw.c @@ -12,9 +12,9 @@ #include "cipher_wbsm4_gcm.h" -// xiaolai -static int wbsm4_xiaolai_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key, - size_t keylen) +/* xiaolai */ +static int wbsm4_xiaolai_gcm_initkey(PROV_GCM_CTX *ctx, + const unsigned char *key, size_t keylen) { PROV_WBSM4_XIAOLAI_GCM_CTX *actx = (PROV_WBSM4_XIAOLAI_GCM_CTX *)ctx; wbsm4_xiaolai_key *ks = &actx->ks.ks; @@ -32,30 +32,22 @@ static int wbsm4_xiaolai_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key static int generic_wbsm4_xiaolai_gcm_cipher_update(PROV_GCM_CTX *ctx, const unsigned char *in, - size_t len, unsigned char *out) + size_t len, + unsigned char *out) { - if (ctx->enc) - { - if (ctx->ctr != NULL) - { + if (ctx->enc) { + if (ctx->ctr != NULL) { if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) return 0; - } - else - { + } else { if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len)) return 0; } - } - else - { - if (ctx->ctr != NULL) - { + } else { + if (ctx->ctr != NULL) { if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) return 0; - } - else - { + } else { if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len)) return 0; } @@ -69,14 +61,15 @@ static const PROV_GCM_HW wbsm4_xiaolai_gcm = { ossl_gcm_aad_update, generic_wbsm4_xiaolai_gcm_cipher_update, ossl_gcm_cipher_final, - ossl_gcm_one_shot}; + ossl_gcm_one_shot +}; const PROV_GCM_HW *ossl_prov_wbsm4_xiaolai_hw_gcm(size_t keybits) { return &wbsm4_xiaolai_gcm; } -// baiwu +/* baiwu */ static int wbsm4_baiwu_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key, size_t keylen) { @@ -98,28 +91,19 @@ static int generic_wbsm4_baiwu_gcm_cipher_update(PROV_GCM_CTX *ctx, const unsigned char *in, size_t len, unsigned char *out) { - if (ctx->enc) - { - if (ctx->ctr != NULL) - { + if (ctx->enc) { + if (ctx->ctr != NULL) { if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) return 0; - } - else - { + } else { if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len)) return 0; } - } - else - { - if (ctx->ctr != NULL) - { + } else { + if (ctx->ctr != NULL) { if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) return 0; - } - else - { + } else { if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len)) return 0; } @@ -133,14 +117,15 @@ static const PROV_GCM_HW wbsm4_baiwu_gcm = { ossl_gcm_aad_update, generic_wbsm4_baiwu_gcm_cipher_update, ossl_gcm_cipher_final, - ossl_gcm_one_shot}; + ossl_gcm_one_shot +}; const PROV_GCM_HW *ossl_prov_wbsm4_baiwu_hw_gcm(size_t keybits) { return &wbsm4_baiwu_gcm; } -// wsise +/* wsise */ static int wbsm4_wsise_gcm_initkey(PROV_GCM_CTX *ctx, const unsigned char *key, size_t keylen) { @@ -162,28 +147,19 @@ static int generic_wbsm4_wsise_gcm_cipher_update(PROV_GCM_CTX *ctx, const unsigned char *in, size_t len, unsigned char *out) { - if (ctx->enc) - { - if (ctx->ctr != NULL) - { + if (ctx->enc) { + if (ctx->ctr != NULL) { if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) return 0; - } - else - { + } else { if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len)) return 0; } - } - else - { - if (ctx->ctr != NULL) - { + } else { + if (ctx->ctr != NULL) { if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr)) return 0; - } - else - { + } else { if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len)) return 0; } @@ -197,7 +173,8 @@ static const PROV_GCM_HW wbsm4_wsise_gcm = { ossl_gcm_aad_update, generic_wbsm4_wsise_gcm_cipher_update, ossl_gcm_cipher_final, - ossl_gcm_one_shot}; + ossl_gcm_one_shot +}; const PROV_GCM_HW *ossl_prov_wbsm4_wsise_hw_gcm(size_t keybits) { diff --git a/providers/implementations/ciphers/cipher_wbsm4_hw.c b/providers/implementations/ciphers/cipher_wbsm4_hw.c index 80baa0be5..9cb381df3 100644 --- a/providers/implementations/ciphers/cipher_wbsm4_hw.c +++ b/providers/implementations/ciphers/cipher_wbsm4_hw.c @@ -10,39 +10,40 @@ #include "cipher_wbsm4.h" -// xiaolai +/* xiaolai */ static int cipher_hw_wbsm4_xiaolai_initkey(PROV_CIPHER_CTX *ctx, - const unsigned char *key, size_t keylen) + const unsigned char *key, + size_t keylen) { PROV_WBSM4_XIAOLAI_CTX *sctx = (PROV_WBSM4_XIAOLAI_CTX *)ctx; wbsm4_xiaolai_key *ks = &sctx->ks.ks; ctx->ks = ks; - if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && ctx->mode != EVP_CIPH_CBC_MODE)) - { + if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && + ctx->mode != EVP_CIPH_CBC_MODE)) { wbsm4_xiaolai_set_key(key, ks); ctx->block = (block128_f)wbsm4_xiaolai_encrypt; - } - else - { - wbsm4_xiaolai_set_key(key, ks); - // ctx->block = (block128_f)ossl_wbsm4_xiaolai_decrypt; + } else { + ERR_raise(ERR_LIB_EVP, EVP_R_BAD_DECRYPT); return 0; } return 1; } -IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_xiaolai_copyctx, PROV_WBSM4_XIAOLAI_CTX) - -#define PROV_CIPHER_HW_wbsm4_xiaolai_mode(mode) \ - static const PROV_CIPHER_HW wbsm4_xiaolai_##mode = { \ - cipher_hw_wbsm4_xiaolai_initkey, \ - ossl_cipher_hw_generic_##mode, \ - cipher_hw_wbsm4_xiaolai_copyctx}; \ - const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_xiaolai_##mode(size_t keybits) \ - { \ - return &wbsm4_xiaolai_##mode; \ +IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_xiaolai_copyctx, + PROV_WBSM4_XIAOLAI_CTX) + +#define PROV_CIPHER_HW_wbsm4_xiaolai_mode(mode) \ + static const PROV_CIPHER_HW wbsm4_xiaolai_##mode = { \ + cipher_hw_wbsm4_xiaolai_initkey, \ + ossl_cipher_hw_generic_##mode, \ + cipher_hw_wbsm4_xiaolai_copyctx \ + }; \ + const PROV_CIPHER_HW * \ + ossl_prov_cipher_hw_wbsm4_xiaolai_##mode(size_t keybits) \ + { \ + return &wbsm4_xiaolai_##mode; \ } PROV_CIPHER_HW_wbsm4_xiaolai_mode(cbc); @@ -51,39 +52,40 @@ PROV_CIPHER_HW_wbsm4_xiaolai_mode(ofb128); PROV_CIPHER_HW_wbsm4_xiaolai_mode(cfb128); PROV_CIPHER_HW_wbsm4_xiaolai_mode(ctr); -// baiwu +/* baiwu */ static int cipher_hw_wbsm4_baiwu_initkey(PROV_CIPHER_CTX *ctx, - const unsigned char *key, size_t keylen) + const unsigned char *key, + size_t keylen) { PROV_WBSM4_BAIWU_CTX *sctx = (PROV_WBSM4_BAIWU_CTX *)ctx; wbsm4_baiwu_key *ks = &sctx->ks.ks; ctx->ks = ks; - if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && ctx->mode != EVP_CIPH_CBC_MODE)) - { + if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && + ctx->mode != EVP_CIPH_CBC_MODE)) { wbsm4_baiwu_set_key(key, ks); ctx->block = (block128_f)wbsm4_baiwu_encrypt; - } - else - { - wbsm4_baiwu_set_key(key, ks); - // ctx->block = (block128_f)ossl_wbsm4_baiwu_decrypt; + } else { + ERR_raise(ERR_LIB_EVP, EVP_R_BAD_DECRYPT); return 0; } return 1; } -IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_baiwu_copyctx, PROV_WBSM4_BAIWU_CTX) - -#define PROV_CIPHER_HW_wbsm4_baiwu_mode(mode) \ - static const PROV_CIPHER_HW wbsm4_baiwu_##mode = { \ - cipher_hw_wbsm4_baiwu_initkey, \ - ossl_cipher_hw_generic_##mode, \ - cipher_hw_wbsm4_baiwu_copyctx}; \ - const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_baiwu_##mode(size_t keybits) \ - { \ - return &wbsm4_baiwu_##mode; \ +IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_baiwu_copyctx, + PROV_WBSM4_BAIWU_CTX) + +#define PROV_CIPHER_HW_wbsm4_baiwu_mode(mode) \ + static const PROV_CIPHER_HW wbsm4_baiwu_##mode = { \ + cipher_hw_wbsm4_baiwu_initkey, \ + ossl_cipher_hw_generic_##mode, \ + cipher_hw_wbsm4_baiwu_copyctx \ + }; \ + const PROV_CIPHER_HW * \ + ossl_prov_cipher_hw_wbsm4_baiwu_##mode(size_t keybits) \ + { \ + return &wbsm4_baiwu_##mode; \ } PROV_CIPHER_HW_wbsm4_baiwu_mode(cbc); @@ -92,39 +94,43 @@ PROV_CIPHER_HW_wbsm4_baiwu_mode(ofb128); PROV_CIPHER_HW_wbsm4_baiwu_mode(cfb128); PROV_CIPHER_HW_wbsm4_baiwu_mode(ctr); -// wsise +/* wsise */ static int cipher_hw_wbsm4_wsise_initkey(PROV_CIPHER_CTX *ctx, - const unsigned char *key, size_t keylen) + const unsigned char *key, + size_t keylen) { PROV_WBSM4_WSISE_CTX *sctx = (PROV_WBSM4_WSISE_CTX *)ctx; wbsm4_wsise_key *ks = &sctx->ks.ks; ctx->ks = ks; - if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && ctx->mode != EVP_CIPH_CBC_MODE)) + if (ctx->enc || (ctx->mode != EVP_CIPH_ECB_MODE && + ctx->mode != EVP_CIPH_CBC_MODE)) { wbsm4_wsise_set_key(key, ks); ctx->block = (block128_f)wbsm4_wsise_encrypt; } else { - wbsm4_wsise_set_key(key, ks); - // ctx->block = (block128_f)ossl_wbsm4_wsise_decrypt; + ERR_raise(ERR_LIB_EVP, EVP_R_BAD_DECRYPT); return 0; } return 1; } -IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_wsise_copyctx, PROV_WBSM4_WSISE_CTX) - -#define PROV_CIPHER_HW_wbsm4_wsise_mode(mode) \ - static const PROV_CIPHER_HW wbsm4_wsise_##mode = { \ - cipher_hw_wbsm4_wsise_initkey, \ - ossl_cipher_hw_generic_##mode, \ - cipher_hw_wbsm4_wsise_copyctx}; \ - const PROV_CIPHER_HW *ossl_prov_cipher_hw_wbsm4_wsise_##mode(size_t keybits) \ - { \ - return &wbsm4_wsise_##mode; \ +IMPLEMENT_CIPHER_HW_COPYCTX(cipher_hw_wbsm4_wsise_copyctx, + PROV_WBSM4_WSISE_CTX) + +#define PROV_CIPHER_HW_wbsm4_wsise_mode(mode) \ + static const PROV_CIPHER_HW wbsm4_wsise_##mode = { \ + cipher_hw_wbsm4_wsise_initkey, \ + ossl_cipher_hw_generic_##mode, \ + cipher_hw_wbsm4_wsise_copyctx \ + }; \ + const PROV_CIPHER_HW * \ + ossl_prov_cipher_hw_wbsm4_wsise_##mode(size_t keybits) \ + { \ + return &wbsm4_wsise_##mode; \ } PROV_CIPHER_HW_wbsm4_wsise_mode(cbc); diff --git a/providers/implementations/kdfs/wbsm4kdf.c b/providers/implementations/kdfs/wbsm4kdf.c index ff5335579..837026328 100644 --- a/providers/implementations/kdfs/wbsm4kdf.c +++ b/providers/implementations/kdfs/wbsm4kdf.c @@ -26,8 +26,7 @@ static OSSL_FUNC_kdf_set_ctx_params_fn kdf_wbsm4_set_ctx_params; static OSSL_FUNC_kdf_gettable_ctx_params_fn kdf_wbsm4_gettable_ctx_params; static OSSL_FUNC_kdf_get_ctx_params_fn kdf_wbsm4_get_ctx_params; -typedef struct -{ +typedef struct { void *provctx; unsigned char *key; size_t key_len; @@ -43,8 +42,7 @@ static void *kdf_wbsm4_new(void *provctx) return NULL; ctx = OPENSSL_zalloc(sizeof(*ctx)); - if (ctx == NULL) - { + if (ctx == NULL) { ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return NULL; } @@ -65,8 +63,7 @@ static void kdf_wbsm4_free(void *vctx) { KDF_WBSM4 *ctx = (KDF_WBSM4 *)vctx; - if (ctx != NULL) - { + if (ctx != NULL) { kdf_wbsm4_cleanup(ctx); OPENSSL_free(ctx); } @@ -88,16 +85,12 @@ static int kdf_wbsm4_set_membuf(unsigned char **buffer, size_t *buflen, *buffer = NULL; *buflen = 0; - if (p->data_size == 0) - { - if ((*buffer = OPENSSL_zalloc(1)) == NULL) - { + if (p->data_size == 0) { + if ((*buffer = OPENSSL_zalloc(1)) == NULL) { ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return 0; } - } - else if (p->data != NULL) - { + } else if (p->data != NULL) { if (!OSSL_PARAM_get_utf8_string(p, (char **)buffer, *buflen)) return 0; } @@ -112,49 +105,42 @@ static int kdf_wbsm4_derive(void *vctx, unsigned char *key, size_t keylen, if (!ossl_prov_is_running() || !kdf_wbsm4_set_ctx_params(ctx, params)) return 0; - if (ctx->cipher == NULL) - { + if (ctx->cipher == NULL) { ERR_raise(ERR_LIB_PROV, PROV_R_MISSING_CIPHER); return 0; } - if (ctx->key == NULL) - { + if (ctx->key == NULL) { ERR_raise(ERR_LIB_PROV, PROV_R_MISSING_KEY); return 0; } - if (ctx->key_len != 32) - { + if (ctx->key_len != 32) { ERR_raise(ERR_LIB_PROV, PROV_R_INVALID_KEY_LENGTH); return 0; } unsigned char sm4key[16]; size_t sm4key_len = sizeof(sm4key); - if (!OPENSSL_hexstr2buf_ex(sm4key, sm4key_len, &sm4key_len, (const char *)ctx->key, 0)) - { + if (!OPENSSL_hexstr2buf_ex(sm4key, sm4key_len, &sm4key_len, + (const char *)ctx->key, 0)) { ERR_raise(ERR_LIB_PROV, PROV_R_INVALID_KEY); return 0; } - if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-XIAOLAI") == 0) - { - if (keylen != sizeof(wbsm4_xiaolai_key)) - { + if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-XIAOLAI") == 0) { + if (keylen != sizeof(wbsm4_xiaolai_key)) { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, PROV_R_BAD_LENGTH); return 0; } - if (key == NULL) - { + if (key == NULL) { OPENSSL_cleanse(sm4key, sm4key_len); return 1; } wbsm4_xiaolai_key *wbsm4key = OPENSSL_zalloc(sizeof(wbsm4_xiaolai_key)); - if (wbsm4key == NULL) - { + if (wbsm4key == NULL) { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return 0; @@ -167,25 +153,20 @@ static int kdf_wbsm4_derive(void *vctx, unsigned char *key, size_t keylen, OPENSSL_cleanse(wbsm4key, sizeof(wbsm4_xiaolai_key)); return 1; - } - else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-BAIWU") == 0) - { - if (keylen != sizeof(wbsm4_baiwu_key)) - { + } else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-BAIWU") == 0) { + if (keylen != sizeof(wbsm4_baiwu_key)) { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, PROV_R_BAD_LENGTH); return 0; } - if (key == NULL) - { + if (key == NULL) { OPENSSL_cleanse(sm4key, sm4key_len); return 1; } wbsm4_baiwu_key *wbsm4key = OPENSSL_zalloc(sizeof(wbsm4_baiwu_key)); - if (wbsm4key == NULL) - { + if (wbsm4key == NULL) { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return 0; @@ -198,25 +179,20 @@ static int kdf_wbsm4_derive(void *vctx, unsigned char *key, size_t keylen, OPENSSL_cleanse(wbsm4key, sizeof(wbsm4_baiwu_key)); return 1; - } - else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-WSISE") == 0) - { - if (keylen != sizeof(wbsm4_wsise_key)) - { + } else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-WSISE") == 0) { + if (keylen != sizeof(wbsm4_wsise_key)) { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, PROV_R_BAD_LENGTH); return 0; } - if (key == NULL) - { + if (key == NULL) { OPENSSL_cleanse(sm4key, sm4key_len); return 1; } wbsm4_wsise_key *wbsm4key = OPENSSL_zalloc(sizeof(wbsm4_wsise_key)); - if (wbsm4key == NULL) - { + if (wbsm4key == NULL) { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); return 0; @@ -229,9 +205,7 @@ static int kdf_wbsm4_derive(void *vctx, unsigned char *key, size_t keylen, OPENSSL_cleanse(wbsm4key, sizeof(wbsm4_wsise_key)); return 1; - } - else - { + } else { OPENSSL_cleanse(sm4key, sm4key_len); ERR_raise(ERR_LIB_PROV, PROV_R_MISSING_CIPHER); return 0; @@ -243,15 +217,13 @@ static int kdf_wbsm4_set_ctx_params(void *vctx, const OSSL_PARAM params[]) const OSSL_PARAM *p; KDF_WBSM4 *ctx = vctx; - if ((p = OSSL_PARAM_locate_const(params, OSSL_KDF_PARAM_KEY)) != NULL) - { + if ((p = OSSL_PARAM_locate_const(params, OSSL_KDF_PARAM_KEY)) != NULL) { if (!kdf_wbsm4_set_membuf(&ctx->key, &ctx->key_len, p)) return 0; ctx->key_len = strlen((char *)ctx->key); } - if ((p = OSSL_PARAM_locate_const(params, OSSL_KDF_PARAM_CIPHER)) != NULL) - { + if ((p = OSSL_PARAM_locate_const(params, OSSL_KDF_PARAM_CIPHER)) != NULL) { if (!kdf_wbsm4_set_membuf(&ctx->cipher, &ctx->cipher_len, p)) return 0; ctx->cipher_len = strlen((char *)ctx->cipher); @@ -266,7 +238,8 @@ static const OSSL_PARAM *kdf_wbsm4_settable_ctx_params(ossl_unused void *ctx, static const OSSL_PARAM known_settable_ctx_params[] = { OSSL_PARAM_utf8_string(OSSL_KDF_PARAM_KEY, NULL, 0), OSSL_PARAM_utf8_string(OSSL_KDF_PARAM_CIPHER, NULL, 0), - OSSL_PARAM_END}; + OSSL_PARAM_END + }; return known_settable_ctx_params; } @@ -277,8 +250,7 @@ static int kdf_wbsm4_get_ctx_params(void *vctx, OSSL_PARAM params[]) OSSL_PARAM *p; size_t keylen = 0; - if ((p = OSSL_PARAM_locate(params, OSSL_KDF_PARAM_SIZE)) != NULL) - { + if ((p = OSSL_PARAM_locate(params, OSSL_KDF_PARAM_SIZE)) != NULL) { if (ctx->cipher == NULL) keylen = 0; else if (OPENSSL_strcasecmp((char *)ctx->cipher, "WBSM4-XIAOLAI") == 0) @@ -300,7 +272,8 @@ static const OSSL_PARAM *kdf_wbsm4_gettable_ctx_params(ossl_unused void *ctx, { static const OSSL_PARAM known_gettable_ctx_params[] = { OSSL_PARAM_size_t(OSSL_KDF_PARAM_SIZE, NULL), - OSSL_PARAM_END}; + OSSL_PARAM_END + }; return known_gettable_ctx_params; } @@ -315,5 +288,6 @@ const OSSL_DISPATCH ossl_kdf_wbsm4_functions[] = { {OSSL_FUNC_KDF_GETTABLE_CTX_PARAMS, (void (*)(void))kdf_wbsm4_gettable_ctx_params}, {OSSL_FUNC_KDF_GET_CTX_PARAMS, (void (*)(void))kdf_wbsm4_get_ctx_params}, - {0, NULL}}; + {0, NULL} +}; #endif diff --git a/test/wbsm4_internal_test.c b/test/wbsm4_internal_test.c index b8d10711f..66f534a94 100644 --- a/test/wbsm4_internal_test.c +++ b/test/wbsm4_internal_test.c @@ -40,16 +40,8 @@ static int test_wbsm4_Xiao_Lai(void) 0x68, 0x1e, 0xdf, 0x34, 0xd2, 0x06, 0x96, 0x5e, 0x86, 0xb3, 0xe9, 0x4f, 0x53, 0x6e, 0x42, 0x46}; - /* - * This test vector comes from Example 2 from GB/T 32907-2016, - * and described in Internet Draft draft-ribose-cfrg-sm4-02. - * After 1,000,000 iterations. - */ - static const uint8_t expected_iter[SM4_BLOCK_SIZE] = { - 0x59, 0x52, 0x98, 0xc7, 0xc6, 0xfd, 0x27, 0x1f, - 0x04, 0x02, 0xf8, 0x04, 0xc3, 0x3d, 0x3f, 0x66}; - - wbsm4_xiaolai_key *wbsm4_key = (wbsm4_xiaolai_key *)malloc(sizeof(wbsm4_xiaolai_key)); + wbsm4_xiaolai_key *wbsm4_key = + (wbsm4_xiaolai_key *)malloc(sizeof(wbsm4_xiaolai_key)); if (wbsm4_key == NULL) return 0; memset(wbsm4_key, 0, sizeof(wbsm4_xiaolai_key)); @@ -60,15 +52,13 @@ static int test_wbsm4_Xiao_Lai(void) memcpy(block, input, SM4_BLOCK_SIZE); wbsm4_xiaolai_encrypt(block, block, wbsm4_key); - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { free(wbsm4_key); return 0; } unsigned char *keybuf = (unsigned char *)malloc(sizeof(wbsm4_xiaolai_key)); - if (!TEST_ptr_ne(keybuf, NULL)) - { + if (!TEST_ptr_ne(keybuf, NULL)) { free(wbsm4_key); return 0; } @@ -78,47 +68,33 @@ static int test_wbsm4_Xiao_Lai(void) memcpy(block, input, SM4_BLOCK_SIZE); wbsm4_xiaolai_encrypt(block, block, wbsm4_key); - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { free(wbsm4_key); free(keybuf); return 0; } - // int i; - // for (i = 0; i != 999999; ++i) - // wbsm4_xiaolai_encrypt(block, block, wbsm4_key); - - // if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected_iter, SM4_BLOCK_SIZE)) - // return 0; - // return 1; - (void)expected_iter; - const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-XIAOLAI-ECB"); - if (!TEST_ptr_ne(cipher, NULL)) - { + if (!TEST_ptr_ne(cipher, NULL)) { free(wbsm4_key); return 0; } int key_length = EVP_CIPHER_get_key_length(cipher); - if (!TEST_int_eq(key_length, sizeof(wbsm4_xiaolai_key))) - { + if (!TEST_int_eq(key_length, sizeof(wbsm4_xiaolai_key))) { free(wbsm4_key); free(keybuf); return 0; } EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new(); - if (!TEST_ptr_ne(cipher_ctx, NULL)) - { + if (!TEST_ptr_ne(cipher_ctx, NULL)) { free(wbsm4_key); free(keybuf); return 0; } int ret = EVP_EncryptInit(cipher_ctx, cipher, (unsigned char *)keybuf, NULL); - if (!TEST_int_eq(ret, 1)) - { + if (!TEST_int_eq(ret, 1)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); @@ -128,15 +104,13 @@ static int test_wbsm4_Xiao_Lai(void) int outl = SM4_BLOCK_SIZE; memcpy(block, input, SM4_BLOCK_SIZE); ret = EVP_EncryptUpdate(cipher_ctx, block, &outl, block, SM4_BLOCK_SIZE); - if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) - { + if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); return 0; } - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); @@ -167,16 +141,8 @@ static int test_wbsm4_Bai_Wu(void) 0x68, 0x1e, 0xdf, 0x34, 0xd2, 0x06, 0x96, 0x5e, 0x86, 0xb3, 0xe9, 0x4f, 0x53, 0x6e, 0x42, 0x46}; - /* - * This test vector comes from Example 2 from GB/T 32907-2016, - * and described in Internet Draft draft-ribose-cfrg-sm4-02. - * After 1,000,000 iterations. - */ - static const uint8_t expected_iter[SM4_BLOCK_SIZE] = { - 0x59, 0x52, 0x98, 0xc7, 0xc6, 0xfd, 0x27, 0x1f, - 0x04, 0x02, 0xf8, 0x04, 0xc3, 0x3d, 0x3f, 0x66}; - - wbsm4_baiwu_key *wbsm4_key = (wbsm4_baiwu_key *)malloc(sizeof(wbsm4_baiwu_key)); + wbsm4_baiwu_key *wbsm4_key = + (wbsm4_baiwu_key *)malloc(sizeof(wbsm4_baiwu_key)); if (wbsm4_key == NULL) return 0; memset(wbsm4_key, 0, sizeof(wbsm4_baiwu_key)); @@ -187,15 +153,13 @@ static int test_wbsm4_Bai_Wu(void) memcpy(block, input, SM4_BLOCK_SIZE); wbsm4_baiwu_encrypt(block, block, wbsm4_key); - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { free(wbsm4_key); return 0; } unsigned char *keybuf = (unsigned char *)malloc(sizeof(wbsm4_baiwu_key)); - if (!TEST_ptr_ne(keybuf, NULL)) - { + if (!TEST_ptr_ne(keybuf, NULL)) { free(wbsm4_key); return 0; } @@ -205,47 +169,33 @@ static int test_wbsm4_Bai_Wu(void) memcpy(block, input, SM4_BLOCK_SIZE); wbsm4_baiwu_encrypt(block, block, wbsm4_key); - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { free(wbsm4_key); free(keybuf); return 0; } - // int i; - // for (i = 0; i != 999999; ++i) - // wbsm4_baiwu_encrypt(block, block, wbsm4_key); - - // if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected_iter, SM4_BLOCK_SIZE)) - // return 0; - // return 1; - (void)expected_iter; - const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-BAIWU-ECB"); - if (!TEST_ptr_ne(cipher, NULL)) - { + if (!TEST_ptr_ne(cipher, NULL)) { free(wbsm4_key); return 0; } int key_length = EVP_CIPHER_get_key_length(cipher); - if (!TEST_int_eq(key_length, sizeof(wbsm4_baiwu_key))) - { + if (!TEST_int_eq(key_length, sizeof(wbsm4_baiwu_key))) { free(wbsm4_key); free(keybuf); return 0; } EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new(); - if (!TEST_ptr_ne(cipher_ctx, NULL)) - { + if (!TEST_ptr_ne(cipher_ctx, NULL)) { free(wbsm4_key); free(keybuf); return 0; } int ret = EVP_EncryptInit(cipher_ctx, cipher, (unsigned char *)keybuf, NULL); - if (!TEST_int_eq(ret, 1)) - { + if (!TEST_int_eq(ret, 1)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); @@ -255,15 +205,13 @@ static int test_wbsm4_Bai_Wu(void) int outl = SM4_BLOCK_SIZE; memcpy(block, input, SM4_BLOCK_SIZE); ret = EVP_EncryptUpdate(cipher_ctx, block, &outl, block, SM4_BLOCK_SIZE); - if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) - { + if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); return 0; } - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); @@ -294,16 +242,8 @@ static int test_wbsm4_WSISE(void) 0x68, 0x1e, 0xdf, 0x34, 0xd2, 0x06, 0x96, 0x5e, 0x86, 0xb3, 0xe9, 0x4f, 0x53, 0x6e, 0x42, 0x46}; - /* - * This test vector comes from Example 2 from GB/T 32907-2016, - * and described in Internet Draft draft-ribose-cfrg-sm4-02. - * After 1,000,000 iterations. - */ - static const uint8_t expected_iter[SM4_BLOCK_SIZE] = { - 0x59, 0x52, 0x98, 0xc7, 0xc6, 0xfd, 0x27, 0x1f, - 0x04, 0x02, 0xf8, 0x04, 0xc3, 0x3d, 0x3f, 0x66}; - - wbsm4_wsise_key *wbsm4_key = (wbsm4_wsise_key *)malloc(sizeof(wbsm4_wsise_key)); + wbsm4_wsise_key *wbsm4_key = + (wbsm4_wsise_key *)malloc(sizeof(wbsm4_wsise_key)); if (wbsm4_key == NULL) return 0; memset(wbsm4_key, 0, sizeof(wbsm4_wsise_key)); @@ -314,15 +254,13 @@ static int test_wbsm4_WSISE(void) memcpy(block, input, SM4_BLOCK_SIZE); wbsm4_wsise_encrypt(block, block, wbsm4_key); - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { free(wbsm4_key); return 0; } unsigned char *keybuf = (unsigned char *)malloc(sizeof(wbsm4_wsise_key)); - if (!TEST_ptr_ne(keybuf, NULL)) - { + if (!TEST_ptr_ne(keybuf, NULL)) { free(wbsm4_key); return 0; } @@ -332,47 +270,33 @@ static int test_wbsm4_WSISE(void) memcpy(block, input, SM4_BLOCK_SIZE); wbsm4_wsise_encrypt(block, block, wbsm4_key); - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { free(wbsm4_key); free(keybuf); return 0; } - // int i; - // for (i = 0; i != 999999; ++i) - // wbsm4_wsise_encrypt(block, block, wbsm4_key); - - // if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected_iter, SM4_BLOCK_SIZE)) - // return 0; - // return 1; - (void)expected_iter; - const EVP_CIPHER *cipher = EVP_get_cipherbyname("WBSM4-WSISE-ECB"); - if (!TEST_ptr_ne(cipher, NULL)) - { + if (!TEST_ptr_ne(cipher, NULL)) { free(wbsm4_key); return 0; } int key_length = EVP_CIPHER_get_key_length(cipher); - if (!TEST_int_eq(key_length, sizeof(wbsm4_wsise_key))) - { + if (!TEST_int_eq(key_length, sizeof(wbsm4_wsise_key))) { free(wbsm4_key); free(keybuf); return 0; } EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new(); - if (!TEST_ptr_ne(cipher_ctx, NULL)) - { + if (!TEST_ptr_ne(cipher_ctx, NULL)) { free(wbsm4_key); free(keybuf); return 0; } int ret = EVP_EncryptInit(cipher_ctx, cipher, (unsigned char *)keybuf, NULL); - if (!TEST_int_eq(ret, 1)) - { + if (!TEST_int_eq(ret, 1)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); @@ -382,15 +306,13 @@ static int test_wbsm4_WSISE(void) int outl = SM4_BLOCK_SIZE; memcpy(block, input, SM4_BLOCK_SIZE); ret = EVP_EncryptUpdate(cipher_ctx, block, &outl, block, SM4_BLOCK_SIZE); - if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) - { + if (!TEST_int_eq(ret, 1) && !TEST_int_eq(outl, 16)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf); return 0; } - if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) - { + if (!TEST_mem_eq(block, SM4_BLOCK_SIZE, expected, SM4_BLOCK_SIZE)) { EVP_CIPHER_CTX_free(cipher_ctx); free(wbsm4_key); free(keybuf);