43 #if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_ECHO
44 #define SPH_SMALL_FOOTPRINT_ECHO 1
52 #if !defined SPH_ECHO_64 && SPH_64_TRUE
64 #pragma warning (disable: 4146)
73 #define AES_BIG_ENDIAN 0
78 #define DECL_STATE_SMALL \
81 #define DECL_STATE_BIG \
84 #define INPUT_BLOCK_SMALL(sc) do { \
86 memcpy(W, sc->u.Vb, 8 * sizeof(sph_u64)); \
87 for (u = 0; u < 12; u ++) { \
88 W[u + 4][0] = sph_dec64le_aligned( \
90 W[u + 4][1] = sph_dec64le_aligned( \
91 sc->buf + 16 * u + 8); \
95 #define INPUT_BLOCK_BIG(sc) do { \
97 memcpy(W, sc->u.Vb, 16 * sizeof(sph_u64)); \
98 for (u = 0; u < 8; u ++) { \
99 W[u + 8][0] = sph_dec64le_aligned( \
101 W[u + 8][1] = sph_dec64le_aligned( \
102 sc->buf + 16 * u + 8); \
106 #if SPH_SMALL_FOOTPRINT_ECHO
109 aes_2rounds_all(sph_u64 W[16][2],
118 for (n = 0; n < 16; n ++) {
119 sph_u64 Wl = W[n][0];
120 sph_u64 Wh = W[n][1];
126 AES_ROUND_LE(X0, X1, X2, X3, K0, K1, K2, K3, Y0, Y1, Y2, Y3);
128 W[n][0] = (sph_u64)X0 | ((sph_u64)X1 << 32);
129 W[n][1] = (sph_u64)X2 | ((sph_u64)X3 << 32);
130 if ((K0 =
T32(K0 + 1)) == 0) {
131 if ((K1 =
T32(K1 + 1)) == 0)
132 if ((K2 =
T32(K2 + 1)) == 0)
142 #define BIG_SUB_WORDS do { \
143 aes_2rounds_all(W, &K0, &K1, &K2, &K3); \
148 #define AES_2ROUNDS(X) do { \
149 sph_u32 X0 = (sph_u32)(X[0]); \
150 sph_u32 X1 = (sph_u32)(X[0] >> 32); \
151 sph_u32 X2 = (sph_u32)(X[1]); \
152 sph_u32 X3 = (sph_u32)(X[1] >> 32); \
153 sph_u32 Y0, Y1, Y2, Y3; \
154 AES_ROUND_LE(X0, X1, X2, X3, K0, K1, K2, K3, Y0, Y1, Y2, Y3); \
155 AES_ROUND_NOKEY_LE(Y0, Y1, Y2, Y3, X0, X1, X2, X3); \
156 X[0] = (sph_u64)X0 | ((sph_u64)X1 << 32); \
157 X[1] = (sph_u64)X2 | ((sph_u64)X3 << 32); \
158 if ((K0 = T32(K0 + 1)) == 0) { \
159 if ((K1 = T32(K1 + 1)) == 0) \
160 if ((K2 = T32(K2 + 1)) == 0) \
165 #define BIG_SUB_WORDS do { \
166 AES_2ROUNDS(W[ 0]); \
167 AES_2ROUNDS(W[ 1]); \
168 AES_2ROUNDS(W[ 2]); \
169 AES_2ROUNDS(W[ 3]); \
170 AES_2ROUNDS(W[ 4]); \
171 AES_2ROUNDS(W[ 5]); \
172 AES_2ROUNDS(W[ 6]); \
173 AES_2ROUNDS(W[ 7]); \
174 AES_2ROUNDS(W[ 8]); \
175 AES_2ROUNDS(W[ 9]); \
176 AES_2ROUNDS(W[10]); \
177 AES_2ROUNDS(W[11]); \
178 AES_2ROUNDS(W[12]); \
179 AES_2ROUNDS(W[13]); \
180 AES_2ROUNDS(W[14]); \
181 AES_2ROUNDS(W[15]); \
186 #define SHIFT_ROW1(a, b, c, d) do { \
200 #define SHIFT_ROW2(a, b, c, d) do { \
216 #define SHIFT_ROW3(a, b, c, d) SHIFT_ROW1(d, c, b, a)
218 #define BIG_SHIFT_ROWS do { \
219 SHIFT_ROW1(1, 5, 9, 13); \
220 SHIFT_ROW2(2, 6, 10, 14); \
221 SHIFT_ROW3(3, 7, 11, 15); \
224 #if SPH_SMALL_FOOTPRINT_ECHO
227 mix_column(sph_u64 W[16][2],
int ia,
int ib,
int ic,
int id)
231 for (n = 0; n < 2; n ++) {
232 sph_u64 a = W[ia][n];
233 sph_u64 b = W[ib][n];
234 sph_u64 c = W[ic][n];
235 sph_u64 d = W[id][n];
239 sph_u64 abx = ((ab & C64(0x8080808080808080)) >> 7) * 27U
240 ^ ((ab & C64(0x7F7F7F7F7F7F7F7F)) << 1);
241 sph_u64 bcx = ((bc & C64(0x8080808080808080)) >> 7) * 27U
242 ^ ((bc & C64(0x7F7F7F7F7F7F7F7F)) << 1);
243 sph_u64 cdx = ((cd & C64(0x8080808080808080)) >> 7) * 27U
244 ^ ((cd & C64(0x7F7F7F7F7F7F7F7F)) << 1);
245 W[ia][n] = abx ^ bc ^ d;
246 W[ib][n] = bcx ^ a ^ cd;
247 W[ic][n] = cdx ^ ab ^ d;
248 W[id][n] = abx ^ bcx ^ cdx ^ ab ^ c;
252 #define MIX_COLUMN(a, b, c, d) mix_column(W, a, b, c, d)
256 #define MIX_COLUMN1(ia, ib, ic, id, n) do { \
257 sph_u64 a = W[ia][n]; \
258 sph_u64 b = W[ib][n]; \
259 sph_u64 c = W[ic][n]; \
260 sph_u64 d = W[id][n]; \
261 sph_u64 ab = a ^ b; \
262 sph_u64 bc = b ^ c; \
263 sph_u64 cd = c ^ d; \
264 sph_u64 abx = ((ab & C64(0x8080808080808080)) >> 7) * 27U \
265 ^ ((ab & C64(0x7F7F7F7F7F7F7F7F)) << 1); \
266 sph_u64 bcx = ((bc & C64(0x8080808080808080)) >> 7) * 27U \
267 ^ ((bc & C64(0x7F7F7F7F7F7F7F7F)) << 1); \
268 sph_u64 cdx = ((cd & C64(0x8080808080808080)) >> 7) * 27U \
269 ^ ((cd & C64(0x7F7F7F7F7F7F7F7F)) << 1); \
270 W[ia][n] = abx ^ bc ^ d; \
271 W[ib][n] = bcx ^ a ^ cd; \
272 W[ic][n] = cdx ^ ab ^ d; \
273 W[id][n] = abx ^ bcx ^ cdx ^ ab ^ c; \
276 #define MIX_COLUMN(a, b, c, d) do { \
277 MIX_COLUMN1(a, b, c, d, 0); \
278 MIX_COLUMN1(a, b, c, d, 1); \
283 #define BIG_MIX_COLUMNS do { \
284 MIX_COLUMN(0, 1, 2, 3); \
285 MIX_COLUMN(4, 5, 6, 7); \
286 MIX_COLUMN(8, 9, 10, 11); \
287 MIX_COLUMN(12, 13, 14, 15); \
290 #define BIG_ROUND do { \
296 #define FINAL_SMALL do { \
298 sph_u64 *VV = &sc->u.Vb[0][0]; \
299 sph_u64 *WW = &W[0][0]; \
300 for (u = 0; u < 8; u ++) { \
301 VV[u] ^= sph_dec64le_aligned(sc->buf + (u * 8)) \
302 ^ sph_dec64le_aligned(sc->buf + (u * 8) + 64) \
303 ^ sph_dec64le_aligned(sc->buf + (u * 8) + 128) \
304 ^ WW[u] ^ WW[u + 8] \
305 ^ WW[u + 16] ^ WW[u + 24]; \
309 #define FINAL_BIG do { \
311 sph_u64 *VV = &sc->u.Vb[0][0]; \
312 sph_u64 *WW = &W[0][0]; \
313 for (u = 0; u < 16; u ++) { \
314 VV[u] ^= sph_dec64le_aligned(sc->buf + (u * 8)) \
315 ^ WW[u] ^ WW[u + 16]; \
319 #define COMPRESS_SMALL(sc) do { \
320 sph_u32 K0 = sc->C0; \
321 sph_u32 K1 = sc->C1; \
322 sph_u32 K2 = sc->C2; \
323 sph_u32 K3 = sc->C3; \
325 INPUT_BLOCK_SMALL(sc); \
326 for (u = 0; u < 8; u ++) { \
332 #define COMPRESS_BIG(sc) do { \
333 sph_u32 K0 = sc->C0; \
334 sph_u32 K1 = sc->C1; \
335 sph_u32 K2 = sc->C2; \
336 sph_u32 K3 = sc->C3; \
338 INPUT_BLOCK_BIG(sc); \
339 for (u = 0; u < 10; u ++) { \
347 #define DECL_STATE_SMALL \
350 #define DECL_STATE_BIG \
353 #define INPUT_BLOCK_SMALL(sc) do { \
355 memcpy(W, sc->u.Vs, 16 * sizeof(sph_u32)); \
356 for (u = 0; u < 12; u ++) { \
357 W[u + 4][0] = sph_dec32le_aligned( \
359 W[u + 4][1] = sph_dec32le_aligned( \
360 sc->buf + 16 * u + 4); \
361 W[u + 4][2] = sph_dec32le_aligned( \
362 sc->buf + 16 * u + 8); \
363 W[u + 4][3] = sph_dec32le_aligned( \
364 sc->buf + 16 * u + 12); \
368 #define INPUT_BLOCK_BIG(sc) do { \
370 memcpy(W, sc->u.Vs, 32 * sizeof(sph_u32)); \
371 for (u = 0; u < 8; u ++) { \
372 W[u + 8][0] = sph_dec32le_aligned( \
374 W[u + 8][1] = sph_dec32le_aligned( \
375 sc->buf + 16 * u + 4); \
376 W[u + 8][2] = sph_dec32le_aligned( \
377 sc->buf + 16 * u + 8); \
378 W[u + 8][3] = sph_dec32le_aligned( \
379 sc->buf + 16 * u + 12); \
383 #if SPH_SMALL_FOOTPRINT_ECHO
386 aes_2rounds_all(
sph_u32 W[16][4],
395 for (n = 0; n < 16; n ++) {
399 K0, K1, K2, K3, Y0, Y1, Y2, Y3);
401 if ((K0 =
T32(K0 + 1)) == 0) {
402 if ((K1 =
T32(K1 + 1)) == 0)
403 if ((K2 =
T32(K2 + 1)) == 0)
413 #define BIG_SUB_WORDS do { \
414 aes_2rounds_all(W, &K0, &K1, &K2, &K3); \
419 #define AES_2ROUNDS(X) do { \
420 sph_u32 Y0, Y1, Y2, Y3; \
421 AES_ROUND_LE(X[0], X[1], X[2], X[3], \
422 K0, K1, K2, K3, Y0, Y1, Y2, Y3); \
423 AES_ROUND_NOKEY_LE(Y0, Y1, Y2, Y3, X[0], X[1], X[2], X[3]); \
424 if ((K0 = T32(K0 + 1)) == 0) { \
425 if ((K1 = T32(K1 + 1)) == 0) \
426 if ((K2 = T32(K2 + 1)) == 0) \
431 #define BIG_SUB_WORDS do { \
432 AES_2ROUNDS(W[ 0]); \
433 AES_2ROUNDS(W[ 1]); \
434 AES_2ROUNDS(W[ 2]); \
435 AES_2ROUNDS(W[ 3]); \
436 AES_2ROUNDS(W[ 4]); \
437 AES_2ROUNDS(W[ 5]); \
438 AES_2ROUNDS(W[ 6]); \
439 AES_2ROUNDS(W[ 7]); \
440 AES_2ROUNDS(W[ 8]); \
441 AES_2ROUNDS(W[ 9]); \
442 AES_2ROUNDS(W[10]); \
443 AES_2ROUNDS(W[11]); \
444 AES_2ROUNDS(W[12]); \
445 AES_2ROUNDS(W[13]); \
446 AES_2ROUNDS(W[14]); \
447 AES_2ROUNDS(W[15]); \
452 #define SHIFT_ROW1(a, b, c, d) do { \
476 #define SHIFT_ROW2(a, b, c, d) do { \
504 #define SHIFT_ROW3(a, b, c, d) SHIFT_ROW1(d, c, b, a)
506 #define BIG_SHIFT_ROWS do { \
507 SHIFT_ROW1(1, 5, 9, 13); \
508 SHIFT_ROW2(2, 6, 10, 14); \
509 SHIFT_ROW3(3, 7, 11, 15); \
512 #if SPH_SMALL_FOOTPRINT_ECHO
515 mix_column(
sph_u32 W[16][4],
int ia,
int ib,
int ic,
int id)
519 for (n = 0; n < 4; n ++) {
527 sph_u32 abx = ((ab &
C32(0x80808080)) >> 7) * 27U
528 ^ ((ab &
C32(0x7F7F7F7F)) << 1);
529 sph_u32 bcx = ((bc &
C32(0x80808080)) >> 7) * 27U
530 ^ ((bc &
C32(0x7F7F7F7F)) << 1);
531 sph_u32 cdx = ((cd &
C32(0x80808080)) >> 7) * 27U
532 ^ ((cd &
C32(0x7F7F7F7F)) << 1);
533 W[ia][n] = abx ^ bc ^ d;
534 W[ib][n] = bcx ^ a ^ cd;
535 W[ic][n] = cdx ^ ab ^ d;
536 W[id][n] = abx ^ bcx ^ cdx ^ ab ^ c;
540 #define MIX_COLUMN(a, b, c, d) mix_column(W, a, b, c, d)
544 #define MIX_COLUMN1(ia, ib, ic, id, n) do { \
545 sph_u32 a = W[ia][n]; \
546 sph_u32 b = W[ib][n]; \
547 sph_u32 c = W[ic][n]; \
548 sph_u32 d = W[id][n]; \
549 sph_u32 ab = a ^ b; \
550 sph_u32 bc = b ^ c; \
551 sph_u32 cd = c ^ d; \
552 sph_u32 abx = ((ab & C32(0x80808080)) >> 7) * 27U \
553 ^ ((ab & C32(0x7F7F7F7F)) << 1); \
554 sph_u32 bcx = ((bc & C32(0x80808080)) >> 7) * 27U \
555 ^ ((bc & C32(0x7F7F7F7F)) << 1); \
556 sph_u32 cdx = ((cd & C32(0x80808080)) >> 7) * 27U \
557 ^ ((cd & C32(0x7F7F7F7F)) << 1); \
558 W[ia][n] = abx ^ bc ^ d; \
559 W[ib][n] = bcx ^ a ^ cd; \
560 W[ic][n] = cdx ^ ab ^ d; \
561 W[id][n] = abx ^ bcx ^ cdx ^ ab ^ c; \
564 #define MIX_COLUMN(a, b, c, d) do { \
565 MIX_COLUMN1(a, b, c, d, 0); \
566 MIX_COLUMN1(a, b, c, d, 1); \
567 MIX_COLUMN1(a, b, c, d, 2); \
568 MIX_COLUMN1(a, b, c, d, 3); \
573 #define BIG_MIX_COLUMNS do { \
574 MIX_COLUMN(0, 1, 2, 3); \
575 MIX_COLUMN(4, 5, 6, 7); \
576 MIX_COLUMN(8, 9, 10, 11); \
577 MIX_COLUMN(12, 13, 14, 15); \
580 #define BIG_ROUND do { \
586 #define FINAL_SMALL do { \
588 sph_u32 *VV = &sc->u.Vs[0][0]; \
589 sph_u32 *WW = &W[0][0]; \
590 for (u = 0; u < 16; u ++) { \
591 VV[u] ^= sph_dec32le_aligned(sc->buf + (u * 4)) \
592 ^ sph_dec32le_aligned(sc->buf + (u * 4) + 64) \
593 ^ sph_dec32le_aligned(sc->buf + (u * 4) + 128) \
594 ^ WW[u] ^ WW[u + 16] \
595 ^ WW[u + 32] ^ WW[u + 48]; \
599 #define FINAL_BIG do { \
601 sph_u32 *VV = &sc->u.Vs[0][0]; \
602 sph_u32 *WW = &W[0][0]; \
603 for (u = 0; u < 32; u ++) { \
604 VV[u] ^= sph_dec32le_aligned(sc->buf + (u * 4)) \
605 ^ WW[u] ^ WW[u + 32]; \
609 #define COMPRESS_SMALL(sc) do { \
610 sph_u32 K0 = sc->C0; \
611 sph_u32 K1 = sc->C1; \
612 sph_u32 K2 = sc->C2; \
613 sph_u32 K3 = sc->C3; \
615 INPUT_BLOCK_SMALL(sc); \
616 for (u = 0; u < 8; u ++) { \
622 #define COMPRESS_BIG(sc) do { \
623 sph_u32 K0 = sc->C0; \
624 sph_u32 K1 = sc->C1; \
625 sph_u32 K2 = sc->C2; \
626 sph_u32 K3 = sc->C3; \
628 INPUT_BLOCK_BIG(sc); \
629 for (u = 0; u < 10; u ++) { \
637 #define INCR_COUNTER(sc, val) do { \
638 sc->C0 = T32(sc->C0 + (sph_u32)(val)); \
639 if (sc->C0 < (sph_u32)(val)) { \
640 if ((sc->C1 = T32(sc->C1 + 1)) == 0) \
641 if ((sc->C2 = T32(sc->C2 + 1)) == 0) \
642 sc->C3 = T32(sc->C3 + 1); \
650 sc->
u.Vb[0][0] = (sph_u64)out_len;
652 sc->
u.Vb[1][0] = (sph_u64)out_len;
654 sc->
u.Vb[2][0] = (sph_u64)out_len;
656 sc->
u.Vb[3][0] = (sph_u64)out_len;
660 sc->
u.
Vs[0][1] = sc->
u.
Vs[0][2] = sc->
u.
Vs[0][3] = 0;
662 sc->
u.
Vs[1][1] = sc->
u.
Vs[1][2] = sc->
u.
Vs[1][3] = 0;
664 sc->
u.
Vs[2][1] = sc->
u.
Vs[2][2] = sc->
u.
Vs[2][3] = 0;
666 sc->
u.
Vs[3][1] = sc->
u.
Vs[3][2] = sc->
u.
Vs[3][3] = 0;
669 sc->
C0 = sc->
C1 = sc->
C2 = sc->
C3 = 0;
676 sc->
u.Vb[0][0] = (sph_u64)out_len;
678 sc->
u.Vb[1][0] = (sph_u64)out_len;
680 sc->
u.Vb[2][0] = (sph_u64)out_len;
682 sc->
u.Vb[3][0] = (sph_u64)out_len;
684 sc->
u.Vb[4][0] = (sph_u64)out_len;
686 sc->
u.Vb[5][0] = (sph_u64)out_len;
688 sc->
u.Vb[6][0] = (sph_u64)out_len;
690 sc->
u.Vb[7][0] = (sph_u64)out_len;
694 sc->
u.
Vs[0][1] = sc->
u.
Vs[0][2] = sc->
u.
Vs[0][3] = 0;
696 sc->
u.
Vs[1][1] = sc->
u.
Vs[1][2] = sc->
u.
Vs[1][3] = 0;
698 sc->
u.
Vs[2][1] = sc->
u.
Vs[2][2] = sc->
u.
Vs[2][3] = 0;
700 sc->
u.
Vs[3][1] = sc->
u.
Vs[3][2] = sc->
u.
Vs[3][3] = 0;
702 sc->
u.
Vs[4][1] = sc->
u.
Vs[4][2] = sc->
u.
Vs[4][3] = 0;
704 sc->
u.
Vs[5][1] = sc->
u.
Vs[5][2] = sc->
u.
Vs[5][3] = 0;
706 sc->
u.
Vs[6][1] = sc->
u.
Vs[6][2] = sc->
u.
Vs[6][3] = 0;
708 sc->
u.
Vs[7][1] = sc->
u.
Vs[7][2] = sc->
u.
Vs[7][3] = 0;
711 sc->
C0 = sc->
C1 = sc->
C2 = sc->
C3 = 0;
732 const unsigned char *data,
size_t len)
739 if (len < (
sizeof sc->
buf) - ptr) {
740 memcpy(buf + ptr, data, len);
749 clen = (
sizeof sc->
buf) - ptr;
752 memcpy(buf + ptr, data, clen);
756 if (ptr ==
sizeof sc->
buf) {
758 echo_small_compress(sc);
767 const unsigned char *data,
size_t len)
774 if (len < (
sizeof sc->
buf) - ptr) {
775 memcpy(buf + ptr, data, len);
784 clen = (
sizeof sc->
buf) - ptr;
787 memcpy(buf + ptr, data, clen);
791 if (ptr ==
sizeof sc->
buf) {
793 echo_big_compress(sc);
802 void *dst,
unsigned out_size_w32)
809 unsigned char tmp[32];
824 elen = ((unsigned)ptr << 3) + n;
826 sph_enc32le_aligned(u.tmp, sc->
C0);
827 sph_enc32le_aligned(u.tmp + 4, sc->
C1);
828 sph_enc32le_aligned(u.tmp + 8, sc->
C2);
829 sph_enc32le_aligned(u.tmp + 12, sc->
C3);
835 sc->
C0 = sc->
C1 = sc->
C2 = sc->
C3 = 0;
838 buf[ptr ++] = ((ub & -z) | z) & 0xFF;
839 memset(buf + ptr, 0, (
sizeof sc->
buf) - ptr);
840 if (ptr > ((
sizeof sc->
buf) - 18)) {
841 echo_small_compress(sc);
842 sc->
C0 = sc->
C1 = sc->
C2 = sc->
C3 = 0;
843 memset(buf, 0,
sizeof sc->
buf);
845 sph_enc16le(buf + (
sizeof sc->
buf) - 18, out_size_w32 << 5);
846 memcpy(buf + (
sizeof sc->
buf) - 16, u.tmp, 16);
847 echo_small_compress(sc);
849 for (VV = &sc->
u.Vb[0][0], k = 0; k < ((out_size_w32 + 1) >> 1); k ++)
850 sph_enc64le_aligned(u.tmp + (k << 3), VV[k]);
852 for (VV = &sc->
u.
Vs[0][0], k = 0; k < out_size_w32; k ++)
853 sph_enc32le_aligned(u.tmp + (k << 2), VV[k]);
855 memcpy(dst, u.tmp, out_size_w32 << 2);
856 echo_small_init(sc, out_size_w32 << 5);
861 void *dst,
unsigned out_size_w32)
868 unsigned char tmp[64];
883 elen = ((unsigned)ptr << 3) + n;
885 sph_enc32le_aligned(u.tmp, sc->
C0);
886 sph_enc32le_aligned(u.tmp + 4, sc->
C1);
887 sph_enc32le_aligned(u.tmp + 8, sc->
C2);
888 sph_enc32le_aligned(u.tmp + 12, sc->
C3);
894 sc->
C0 = sc->
C1 = sc->
C2 = sc->
C3 = 0;
897 buf[ptr ++] = ((ub & -z) | z) & 0xFF;
898 memset(buf + ptr, 0, (
sizeof sc->
buf) - ptr);
899 if (ptr > ((
sizeof sc->
buf) - 18)) {
900 echo_big_compress(sc);
901 sc->
C0 = sc->
C1 = sc->
C2 = sc->
C3 = 0;
902 memset(buf, 0,
sizeof sc->
buf);
904 sph_enc16le(buf + (
sizeof sc->
buf) - 18, out_size_w32 << 5);
905 memcpy(buf + (
sizeof sc->
buf) - 16, u.tmp, 16);
906 echo_big_compress(sc);
908 for (VV = &sc->
u.Vb[0][0], k = 0; k < ((out_size_w32 + 1) >> 1); k ++)
909 sph_enc64le_aligned(u.tmp + (k << 3), VV[k]);
911 for (VV = &sc->
u.
Vs[0][0], k = 0; k < out_size_w32; k ++)
912 sph_enc32le_aligned(u.tmp + (k << 2), VV[k]);
914 memcpy(dst, u.tmp, out_size_w32 << 2);
915 echo_big_init(sc, out_size_w32 << 5);
922 echo_small_init(cc, 224);
929 echo_small_core(cc, data, len);
936 echo_small_close(cc, 0, 0, dst, 7);
943 echo_small_close(cc, ub, n, dst, 7);
950 echo_small_init(cc, 256);
957 echo_small_core(cc, data, len);
964 echo_small_close(cc, 0, 0, dst, 8);
971 echo_small_close(cc, ub, n, dst, 8);
978 echo_big_init(cc, 384);
985 echo_big_core(cc, data, len);
992 echo_big_close(cc, 0, 0, dst, 12);
999 echo_big_close(cc, ub, n, dst, 12);
1006 echo_big_init(cc, 512);
1013 echo_big_core(cc, data, len);
1020 echo_big_close(cc, 0, 0, dst, 16);
1027 echo_big_close(cc, ub, n, dst, 16);
#define AES_ROUND_LE(X0, X1, X2, X3, K0, K1, K2, K3, Y0, Y1, Y2, Y3)
#define AES_ROUND_NOKEY_LE(X0, X1, X2, X3, Y0, Y1, Y2, Y3)
void sph_echo384_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
#define INCR_COUNTER(sc, val)
void sph_echo384_close(void *cc, void *dst)
Terminate the current ECHO-384 computation and output the result into the provided buffer.
void sph_echo512(void *cc, const void *data, size_t len)
Process some data bytes.
void sph_echo224_close(void *cc, void *dst)
Terminate the current ECHO-224 computation and output the result into the provided buffer.
void sph_echo224(void *cc, const void *data, size_t len)
Process some data bytes.
void sph_echo256_init(void *cc)
Initialize an ECHO-256 context.
void sph_echo256_close(void *cc, void *dst)
Terminate the current ECHO-256 computation and output the result into the provided buffer.
void sph_echo224_init(void *cc)
Initialize an ECHO-224 context.
void sph_echo512_close(void *cc, void *dst)
Terminate the current ECHO-512 computation and output the result into the provided buffer.
void sph_echo384(void *cc, const void *data, size_t len)
Process some data bytes.
void sph_echo224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
void sph_echo512_init(void *cc)
Initialize an ECHO-512 context.
void sph_echo512_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
void sph_echo256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
void sph_echo384_init(void *cc)
Initialize an ECHO-384 context.
#define COMPRESS_SMALL(sc)
void sph_echo256(void *cc, const void *data, size_t len)
Process some data bytes.
void * memcpy(void *a, const void *b, size_t c)
This structure is a context for ECHO computations: it contains the intermediate values and some data ...
union sph_echo_big_context::@4 u
This structure is a context for ECHO computations: it contains the intermediate values and some data ...
union sph_echo_small_context::@3 u