X-Git-Url: https://bilbo.iut-bm.univ-fcomte.fr/and/gitweb/Cipher_code.git/blobdiff_plain/a1bf55298885bab573938b35a5738bbf54b717c6..e0ff6be80e2df6093b167083fe31538b9c97e470:/LightweightARM/LWARM/lwarm.cpp?ds=sidebyside diff --git a/LightweightARM/LWARM/lwarm.cpp b/LightweightARM/LWARM/lwarm.cpp index 165fb19..13d8bba 100644 --- a/LightweightARM/LWARM/lwarm.cpp +++ b/LightweightARM/LWARM/lwarm.cpp @@ -9,6 +9,7 @@ #include #include #include +#include /*#include #include @@ -74,7 +75,6 @@ void inverse_tables_int(int *tab, int size_tab,int *inv_perm_tabs) { for(int i=0;i 6 && i % Nk == 4) + { + // Function Subword() + { + temp[0]=sbox[temp[0]]; + temp[1]=sbox[temp[1]]; + temp[2]=sbox[temp[2]]; + temp[3]=sbox[temp[3]]; + } + } + RoundKey[i*4+0] = RoundKey[(i-Nk)*4+0] ^ temp[0]; + RoundKey[i*4+1] = RoundKey[(i-Nk)*4+1] ^ temp[1]; + RoundKey[i*4+2] = RoundKey[(i-Nk)*4+2] ^ temp[2]; + RoundKey[i*4+3] = RoundKey[(i-Nk)*4+3] ^ temp[3]; + i++; + } + + printf("i %d\n",i); +} + + + + + + + + +#define key_size 256 + +uint8_t enc_key[key_size]; + + + + + + +void AES_encrypt(const uint8_t *in, uint8_t *out, const uint8x16_t *rdkeys, unsigned int rounds, int val_i,char** target) +{ + + + + + // Load the block + uint8x16_t data = vld1q_u8(in); + uint8x16_t tmp; + + + + uint64x2_t v1=vdupq_n_u64(val_i); + + uint8x16_t key=reinterpret_cast(v1); + + + + + tmp = veorq_u8(key, rdkeys[0]); + /* + if(val_i<1) { + static uint8_t p[16]; + + vst1q_u8 (p, key); + + for(int i=0;i<16;i++) + *target += sprintf(*target, "%d ", p[i]); + *target += sprintf(*target, "\n "); + + vst1q_u8 (p, rdkeys[0]); + + for(int i=0;i<16;i++) + *target += sprintf(*target, "%d ", p[i]); + *target += sprintf(*target, "\n "); + + } + */ + + // AES encryption with ARM intrinsics: + // rnds-1 (9 for AES128) cycles of AES: + // (Add, Shift, Sub) plus Mix Columns + unsigned int i; + for (i=1; i(v1); + print128_num(keygened); + // cout<<"keygened "<(v1); + v1=vdupq_n_u64(val_i+1); + uint8x16_t key2=reinterpret_cast(v1); + v1=vdupq_n_u64(val_i+2); + uint8x16_t key3=reinterpret_cast(v1); + v1=vdupq_n_u64(val_i+3); + uint8x16_t key4=reinterpret_cast(v1); + + tmp1 = veorq_u8(key1, rdkeys[0]); + tmp2 = veorq_u8(key2, rdkeys[0]); + tmp3 = veorq_u8(key3, rdkeys[0]); + tmp4 = veorq_u8(key4, rdkeys[0]); + + + // AES encryption with ARM intrinsics: + // rnds-1 (9 for AES128) cycles of AES: + // (Add, Shift, Sub) plus Mix Columns + unsigned int i; + for (i=1; i(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,1); - else - encrypt<4*4>(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,0); - - } - break; - case 8: - for(i=0;i(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,1); - else - encrypt<8*8>(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,0); - - } - break; - case 16: - for(i=0;i(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,1); - else - encrypt<16*16>(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,0); - - } - break; - case 32: - for(i=0;i(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,1); - else - encrypt<32*32>(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,0); - - } - break; - case 64: - for(i=0;i(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,1); - else - encrypt<64*64>(seq, seq2,len,RM1,Pbox,PboxRM,Sbox1,Sbox2,0); - + + const int ROUNDS=10; + for(int a=0;a(seq2, seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - else - decrypt<4*4>(seq2,seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - } - break; - case 8: - for(i=0;i(seq2, seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - else - decrypt<8*8>(seq2,seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - } - break; - case 16: - for(i=0;i(seq2, seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - else - decrypt<16*16>(seq2,seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - } - break; - case 32: - for(i=0;i(seq2, seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - else - decrypt<32*32>(seq2,seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - } - break; - case 64: - for(i=0;i(seq2, seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - else - decrypt<64*64>(seq2,seq,len,RM2,Pbox,PboxRM,Sbox1,Sbox2,0); - } - break; + + for(int a=0;a