2 * GF-Complete: A Comprehensive Open Source Library for Galois Field Arithmetic
3 * James S. Plank, Ethan L. Miller, Kevin M. Greenan,
4 * Benjamin A. Arnold, John A. Burnum, Adam W. Disney, Allen C. McBride.
8 * Routines for 4-bit Galois fields
17 #define AB2(ip, am1 ,am2, b, t1, t2) {\
20 t2 = ((t2 << 1) - (t2 >> (GF_FIELD_WIDTH-1))); \
21 b = (t1 ^ (t2 & ip));}
23 // ToDo(KMG/JSP): Why is 0x88 hard-coded?
24 #define SSE_AB2(pp, m1, va, t1, t2) {\
25 t1 = _mm_and_si128(_mm_slli_epi64(va, 1), m1); \
26 t2 = _mm_and_si128(va, _mm_set1_epi8(0x88)); \
27 t2 = _mm_sub_epi64 (_mm_slli_epi64(t2, 1), _mm_srli_epi64(t2, (GF_FIELD_WIDTH-1))); \
28 va = _mm_xor_si128(t1, _mm_and_si128(t2, pp)); }
30 /* ------------------------------------------------------------
31 JSP: These are basic and work from multiple implementations.
36 gf_val_32_t gf_w4_inverse_from_divide (gf_t *gf, gf_val_32_t a)
38 return gf->divide.w32(gf, 1, a);
43 gf_val_32_t gf_w4_divide_from_inverse (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
45 b = gf->inverse.w32(gf, b);
46 return gf->multiply.w32(gf, a, b);
51 gf_val_32_t gf_w4_euclid (gf_t *gf, gf_val_32_t b)
53 gf_val_32_t e_i, e_im1, e_ip1;
54 gf_val_32_t d_i, d_im1, d_ip1;
55 gf_val_32_t y_i, y_im1, y_ip1;
58 if (b == 0) return -1;
59 e_im1 = ((gf_internal_t *) (gf->scratch))->prim_poly;
62 for (d_i = d_im1; ((1 << d_i) & e_i) == 0; d_i--) ;
71 while (d_ip1 >= d_i) {
72 c_i ^= (1 << (d_ip1 - d_i));
73 e_ip1 ^= (e_i << (d_ip1 - d_i));
74 if (e_ip1 == 0) return 0;
75 while ((e_ip1 & (1 << d_ip1)) == 0) d_ip1--;
78 y_ip1 = y_im1 ^ gf->multiply.w32(gf, c_i, y_i);
92 gf_val_32_t gf_w4_extract_word(gf_t *gf, void *start, int bytes, int index)
96 r8 = (uint8_t *) start;
108 gf_val_32_t gf_w4_matrix (gf_t *gf, gf_val_32_t b)
110 return gf_bitmatrix_inverse(b, 4, ((gf_internal_t *) (gf->scratch))->prim_poly);
117 gf_w4_shift_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
119 uint8_t product, i, pp;
122 h = (gf_internal_t *) gf->scratch;
127 for (i = 0; i < GF_FIELD_WIDTH; i++) {
128 if (a & (1 << i)) product ^= (b << i);
130 for (i = (GF_FIELD_WIDTH*2-2); i >= GF_FIELD_WIDTH; i--) {
131 if (product & (1 << i)) product ^= (pp << (i-GF_FIELD_WIDTH));
136 /* Ben: This function works, but it is 33% slower than the normal shift mult */
138 #if defined(INTEL_SSE4_PCLMUL)
142 gf_w4_clm_multiply (gf_t *gf, gf_val_32_t a4, gf_val_32_t b4)
150 gf_internal_t * h = gf->scratch;
152 a = _mm_insert_epi32 (_mm_setzero_si128(), a4, 0);
153 b = _mm_insert_epi32 (a, b4, 0);
155 prim_poly = _mm_set_epi32(0, 0, 0, (uint32_t)(h->prim_poly & 0x1fULL));
157 /* Do the initial multiply */
159 result = _mm_clmulepi64_si128 (a, b, 0);
161 /* Ben/JSP: Do prim_poly reduction once. We are guaranteed that we will only
162 have to do the reduction only once, because (w-2)/z == 1. Where
163 z is equal to the number of zeros after the leading 1.
165 _mm_clmulepi64_si128 is the carryless multiply operation. Here
166 _mm_srli_epi64 shifts the result to the right by 4 bits. This allows
167 us to multiply the prim_poly by the leading bits of the result. We
168 then xor the result of that operation back with the result. */
170 w = _mm_clmulepi64_si128 (prim_poly, _mm_srli_epi64 (result, 4), 0);
171 result = _mm_xor_si128 (result, w);
173 /* Extracts 32 bit value from result. */
175 rv = ((gf_val_32_t)_mm_extract_epi32(result, 0));
182 gf_w4_multiply_region_from_single(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int
189 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
190 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
192 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 1);
193 gf_do_initial_region_alignment(&rd);
195 s8 = (uint8_t *) rd.s_start;
196 d8 = (uint8_t *) rd.d_start;
199 while (d8 < ((uint8_t *) rd.d_top)) {
200 *d8 ^= (gf->multiply.w32(gf, val, (*s8 & 0xf)) |
201 ((gf->multiply.w32(gf, val, (*s8 >> 4))) << 4));
206 while (d8 < ((uint8_t *) rd.d_top)) {
207 *d8 = (gf->multiply.w32(gf, val, (*s8 & 0xf)) |
208 ((gf->multiply.w32(gf, val, (*s8 >> 4))) << 4));
213 gf_do_final_region_alignment(&rd);
216 /* ------------------------------------------------------------
217 IMPLEMENTATION: LOG_TABLE:
219 JSP: This is a basic log-antilog implementation.
220 I'm not going to spend any time optimizing it because the
221 other techniques are faster for both single and region
228 gf_w4_log_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
230 struct gf_logtable_data *ltd;
232 ltd = (struct gf_logtable_data *) ((gf_internal_t *) (gf->scratch))->private;
233 return (a == 0 || b == 0) ? 0 : ltd->antilog_tbl[(unsigned)(ltd->log_tbl[a] + ltd->log_tbl[b])];
239 gf_w4_log_divide (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
242 struct gf_logtable_data *ltd;
244 if (a == 0 || b == 0) return 0;
245 ltd = (struct gf_logtable_data *) ((gf_internal_t *) (gf->scratch))->private;
247 log_sum = ltd->log_tbl[a] - ltd->log_tbl[b];
248 return (ltd->antilog_tbl_div[log_sum]);
253 gf_w4_log_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
259 struct gf_logtable_data *ltd;
261 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
262 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
264 ltd = (struct gf_logtable_data *) ((gf_internal_t *) (gf->scratch))->private;
265 s8 = (uint8_t *) src;
266 d8 = (uint8_t *) dest;
268 lv = ltd->log_tbl[val];
270 for (i = 0; i < bytes; i++) {
271 c = (xor) ? d8[i] : 0;
272 b = (s8[i] >> GF_FIELD_WIDTH);
273 c ^= (b == 0) ? 0 : (ltd->antilog_tbl[lv + ltd->log_tbl[b]] << GF_FIELD_WIDTH);
275 c ^= (b == 0) ? 0 : ltd->antilog_tbl[lv + ltd->log_tbl[b]];
281 int gf_w4_log_init(gf_t *gf)
284 struct gf_logtable_data *ltd;
287 h = (gf_internal_t *) gf->scratch;
290 for (i = 0; i < GF_FIELD_SIZE; i++)
293 ltd->antilog_tbl_div = ltd->antilog_tbl + (GF_FIELD_SIZE-1);
297 if (ltd->log_tbl[b] != 0 && i != 0) {
298 fprintf(stderr, "Cannot construct log table: Polynomial is not primitive.\n\n");
302 ltd->antilog_tbl[i] = b;
303 ltd->antilog_tbl[i+GF_FIELD_SIZE-1] = b;
306 if (b & GF_FIELD_SIZE) b = b ^ h->prim_poly;
309 if (i != GF_FIELD_SIZE - 1) {
310 _gf_errno = GF_E_LOGPOLY;
314 SET_FUNCTION(gf,inverse,w32,gf_w4_inverse_from_divide)
315 SET_FUNCTION(gf,divide,w32,gf_w4_log_divide)
316 SET_FUNCTION(gf,multiply,w32,gf_w4_log_multiply)
317 SET_FUNCTION(gf,multiply_region,w32,gf_w4_log_multiply_region)
321 /* ------------------------------------------------------------
322 IMPLEMENTATION: SINGLE TABLE: JSP.
328 gf_w4_single_table_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
330 struct gf_single_table_data *std;
332 std = (struct gf_single_table_data *) ((gf_internal_t *) (gf->scratch))->private;
333 return std->mult[a][b];
339 gf_w4_single_table_divide (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
341 struct gf_single_table_data *std;
343 std = (struct gf_single_table_data *) ((gf_internal_t *) (gf->scratch))->private;
344 return std->div[a][b];
349 gf_w4_single_table_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
355 struct gf_single_table_data *std;
357 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
358 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
360 std = (struct gf_single_table_data *) ((gf_internal_t *) (gf->scratch))->private;
361 s8 = (uint8_t *) src;
362 d8 = (uint8_t *) dest;
364 for (i = 0; i < bytes; i++) {
365 c = (xor) ? d8[i] : 0;
366 b = (s8[i] >> GF_FIELD_WIDTH);
367 c ^= (std->mult[val][b] << GF_FIELD_WIDTH);
369 c ^= (std->mult[val][b]);
374 #define MM_PRINT(s, r) { uint8_t blah[16]; printf("%-12s", s); _mm_storeu_si128((__m128i *)blah, r); for (i = 0; i < 16; i++) printf(" %02x", blah[i]); printf("\n"); }
379 gf_w4_single_table_sse_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
382 uint8_t *base, *sptr, *dptr, *top;
383 __m128i tl, loset, r, va, th;
385 struct gf_single_table_data *std;
387 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
388 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
390 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 16);
392 std = (struct gf_single_table_data *) ((gf_internal_t *) (gf->scratch))->private;
393 base = (uint8_t *) std->mult;
394 base += (val << GF_FIELD_WIDTH);
396 gf_do_initial_region_alignment(&rd);
398 tl = _mm_loadu_si128((__m128i *)base);
399 th = _mm_slli_epi64(tl, 4);
400 loset = _mm_set1_epi8 (0x0f);
406 while (sptr < (uint8_t *) top) {
407 va = _mm_load_si128 ((__m128i *)(sptr));
408 r = _mm_and_si128 (loset, va);
409 r = _mm_shuffle_epi8 (tl, r);
410 va = _mm_srli_epi64 (va, 4);
411 va = _mm_and_si128 (loset, va);
412 va = _mm_shuffle_epi8 (th, va);
413 r = _mm_xor_si128 (r, va);
414 va = (xor) ? _mm_load_si128 ((__m128i *)(dptr)) : _mm_setzero_si128();
415 r = _mm_xor_si128 (r, va);
416 _mm_store_si128 ((__m128i *)(dptr), r);
420 gf_do_final_region_alignment(&rd);
426 int gf_w4_single_table_init(gf_t *gf)
429 struct gf_single_table_data *std;
433 h = (gf_internal_t *) gf->scratch;
434 std = (struct gf_single_table_data *)h->private;
436 bzero(std->mult, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
437 bzero(std->div, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
439 for (a = 1; a < GF_FIELD_SIZE; a++) {
440 for (b = 1; b < GF_FIELD_SIZE; b++) {
441 prod = gf_w4_shift_multiply(gf, a, b);
442 std->mult[a][b] = prod;
443 std->div[prod][b] = a;
447 SET_FUNCTION(gf,inverse,w32,NULL)
448 SET_FUNCTION(gf,divide,w32,gf_w4_single_table_divide)
449 SET_FUNCTION(gf,multiply,w32,gf_w4_single_table_multiply)
450 #if defined(INTEL_SSSE3)
451 if (gf_cpu_supports_intel_ssse3 && !(h->region_type & (GF_REGION_NOSIMD | GF_REGION_CAUCHY))) {
452 SET_FUNCTION(gf,multiply_region,w32,gf_w4_single_table_sse_multiply_region)
454 #elif defined(ARM_NEON)
455 if (gf_cpu_supports_arm_neon && !(h->region_type & (GF_REGION_NOSIMD | GF_REGION_CAUCHY))) {
456 gf_w4_neon_single_table_init(gf);
459 SET_FUNCTION(gf,multiply_region,w32,gf_w4_single_table_multiply_region)
460 if (h->region_type & GF_REGION_SIMD) return 0;
461 #if defined(INTEL_SSSE3) || defined(ARM_NEON)
468 /* ------------------------------------------------------------
469 IMPLEMENTATION: DOUBLE TABLE: JSP.
475 gf_w4_double_table_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
477 struct gf_double_table_data *std;
479 std = (struct gf_double_table_data *) ((gf_internal_t *) (gf->scratch))->private;
480 return std->mult[a][b];
486 gf_w4_double_table_divide (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
488 struct gf_double_table_data *std;
490 std = (struct gf_double_table_data *) ((gf_internal_t *) (gf->scratch))->private;
491 return std->div[a][b];
496 gf_w4_double_table_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
499 uint8_t *s8, *d8, *base;
501 struct gf_double_table_data *std;
503 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
504 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
506 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 8);
508 std = (struct gf_double_table_data *) ((gf_internal_t *) (gf->scratch))->private;
509 s8 = (uint8_t *) src;
510 d8 = (uint8_t *) dest;
511 base = (uint8_t *) std->mult;
512 base += (val << GF_DOUBLE_WIDTH);
515 for (i = 0; i < bytes; i++) d8[i] ^= base[s8[i]];
517 for (i = 0; i < bytes; i++) d8[i] = base[s8[i]];
522 int gf_w4_double_table_init(gf_t *gf)
525 struct gf_double_table_data *std;
526 int a, b, c, prod, ab;
527 uint8_t mult[GF_FIELD_SIZE][GF_FIELD_SIZE];
529 h = (gf_internal_t *) gf->scratch;
530 std = (struct gf_double_table_data *)h->private;
532 bzero(mult, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
533 bzero(std->div, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
535 for (a = 1; a < GF_FIELD_SIZE; a++) {
536 for (b = 1; b < GF_FIELD_SIZE; b++) {
537 prod = gf_w4_shift_multiply(gf, a, b);
539 std->div[prod][b] = a;
542 bzero(std->mult, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE * GF_FIELD_SIZE);
543 for (a = 0; a < GF_FIELD_SIZE; a++) {
544 for (b = 0; b < GF_FIELD_SIZE; b++) {
546 for (c = 0; c < GF_FIELD_SIZE; c++) {
547 std->mult[a][(b << 4) | c] = ((ab << 4) | mult[a][c]);
552 SET_FUNCTION(gf,inverse,w32,NULL)
553 SET_FUNCTION(gf,divide,w32,gf_w4_double_table_divide)
554 SET_FUNCTION(gf,multiply,w32,gf_w4_double_table_multiply)
555 SET_FUNCTION(gf,multiply_region,w32,gf_w4_double_table_multiply_region)
563 gf_w4_quad_table_lazy_divide (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
565 struct gf_quad_table_lazy_data *std;
567 std = (struct gf_quad_table_lazy_data *) ((gf_internal_t *) (gf->scratch))->private;
568 return std->div[a][b];
574 gf_w4_quad_table_lazy_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
576 struct gf_quad_table_lazy_data *std;
578 std = (struct gf_quad_table_lazy_data *) ((gf_internal_t *) (gf->scratch))->private;
579 return std->smult[a][b];
585 gf_w4_quad_table_divide (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
587 struct gf_quad_table_data *std;
589 std = (struct gf_quad_table_data *) ((gf_internal_t *) (gf->scratch))->private;
590 return std->div[a][b];
596 gf_w4_quad_table_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
598 struct gf_quad_table_data *std;
601 std = (struct gf_quad_table_data *) ((gf_internal_t *) (gf->scratch))->private;
608 gf_w4_quad_table_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
612 struct gf_quad_table_data *std;
613 struct gf_quad_table_lazy_data *ltd;
615 int a, b, c, d, va, vb, vc, vd;
617 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
618 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
620 h = (gf_internal_t *) (gf->scratch);
621 if (h->region_type & GF_REGION_LAZY) {
622 ltd = (struct gf_quad_table_lazy_data *) ((gf_internal_t *) (gf->scratch))->private;
624 for (a = 0; a < 16; a++) {
625 va = (ltd->smult[val][a] << 12);
626 for (b = 0; b < 16; b++) {
627 vb = (ltd->smult[val][b] << 8);
628 for (c = 0; c < 16; c++) {
629 vc = (ltd->smult[val][c] << 4);
630 for (d = 0; d < 16; d++) {
631 vd = ltd->smult[val][d];
632 base[(a << 12) | (b << 8) | (c << 4) | d ] = (va | vb | vc | vd);
638 std = (struct gf_quad_table_data *) ((gf_internal_t *) (gf->scratch))->private;
639 base = &(std->mult[val][0]);
642 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 8);
643 gf_do_initial_region_alignment(&rd);
644 gf_two_byte_region_table_multiply(&rd, base);
645 gf_do_final_region_alignment(&rd);
649 int gf_w4_quad_table_init(gf_t *gf)
652 struct gf_quad_table_data *std;
653 int prod, val, a, b, c, d, va, vb, vc, vd;
654 uint8_t mult[GF_FIELD_SIZE][GF_FIELD_SIZE];
656 h = (gf_internal_t *) gf->scratch;
657 std = (struct gf_quad_table_data *)h->private;
659 bzero(mult, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
660 bzero(std->div, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
662 for (a = 1; a < GF_FIELD_SIZE; a++) {
663 for (b = 1; b < GF_FIELD_SIZE; b++) {
664 prod = gf_w4_shift_multiply(gf, a, b);
666 std->div[prod][b] = a;
670 for (val = 0; val < 16; val++) {
671 for (a = 0; a < 16; a++) {
672 va = (mult[val][a] << 12);
673 for (b = 0; b < 16; b++) {
674 vb = (mult[val][b] << 8);
675 for (c = 0; c < 16; c++) {
676 vc = (mult[val][c] << 4);
677 for (d = 0; d < 16; d++) {
679 std->mult[val][(a << 12) | (b << 8) | (c << 4) | d ] = (va | vb | vc | vd);
686 SET_FUNCTION(gf,inverse,w32,NULL)
687 SET_FUNCTION(gf,divide,w32,gf_w4_quad_table_divide)
688 SET_FUNCTION(gf,multiply,w32,gf_w4_quad_table_multiply)
689 SET_FUNCTION(gf,multiply_region,w32,gf_w4_quad_table_multiply_region)
693 int gf_w4_quad_table_lazy_init(gf_t *gf)
696 struct gf_quad_table_lazy_data *std;
697 int a, b, prod, loga, logb;
698 uint8_t log_tbl[GF_FIELD_SIZE];
699 uint8_t antilog_tbl[GF_FIELD_SIZE*2];
701 h = (gf_internal_t *) gf->scratch;
702 std = (struct gf_quad_table_lazy_data *)h->private;
705 for (a = 0; a < GF_MULT_GROUP_SIZE; a++) {
708 antilog_tbl[a+GF_MULT_GROUP_SIZE] = b;
710 if (b & GF_FIELD_SIZE) {
711 b = b ^ h->prim_poly;
715 bzero(std->smult, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
716 bzero(std->div, sizeof(uint8_t) * GF_FIELD_SIZE * GF_FIELD_SIZE);
718 for (a = 1; a < GF_FIELD_SIZE; a++) {
720 for (b = 1; b < GF_FIELD_SIZE; b++) {
722 prod = antilog_tbl[loga+logb];
723 std->smult[a][b] = prod;
724 std->div[prod][b] = a;
728 SET_FUNCTION(gf,inverse,w32,NULL)
729 SET_FUNCTION(gf,divide,w32,gf_w4_quad_table_lazy_divide)
730 SET_FUNCTION(gf,multiply,w32,gf_w4_quad_table_lazy_multiply)
731 SET_FUNCTION(gf,multiply_region,w32,gf_w4_quad_table_multiply_region)
736 int gf_w4_table_init(gf_t *gf)
741 h = (gf_internal_t *) gf->scratch;
742 rt = (h->region_type);
744 if (h->mult_type == GF_MULT_DEFAULT &&
745 !(gf_cpu_supports_intel_ssse3 || gf_cpu_supports_arm_neon))
746 rt |= GF_REGION_DOUBLE_TABLE;
748 if (rt & GF_REGION_DOUBLE_TABLE) {
749 return gf_w4_double_table_init(gf);
750 } else if (rt & GF_REGION_QUAD_TABLE) {
751 if (rt & GF_REGION_LAZY) {
752 return gf_w4_quad_table_lazy_init(gf);
754 return gf_w4_quad_table_init(gf);
757 return gf_w4_single_table_init(gf);
762 /* ------------------------------------------------------------
763 JSP: GF_MULT_BYTWO_p and _b: See the paper.
769 gf_w4_bytwo_p_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
771 uint32_t prod, pp, pmask, amask;
774 h = (gf_internal_t *) gf->scratch;
784 prod = ((prod << 1) ^ pp);
788 if (a & amask) prod ^= b;
797 gf_w4_bytwo_b_multiply (gf_t *gf, gf_val_32_t a, gf_val_32_t b)
799 uint32_t prod, pp, bmask;
802 h = (gf_internal_t *) gf->scratch;
809 if (a & 1) prod ^= b;
811 if (a == 0) return prod;
822 gf_w4_bytwo_p_nosse_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
824 uint64_t *s64, *d64, t1, t2, ta, prod, amask;
826 struct gf_bytwo_data *btd;
828 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
829 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
831 btd = (struct gf_bytwo_data *) ((gf_internal_t *) (gf->scratch))->private;
833 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 8);
834 gf_do_initial_region_alignment(&rd);
836 s64 = (uint64_t *) rd.s_start;
837 d64 = (uint64_t *) rd.d_start;
840 while (s64 < (uint64_t *) rd.s_top) {
845 AB2(btd->prim_poly, btd->mask1, btd->mask2, prod, t1, t2);
846 if (val & amask) prod ^= ta;
854 while (s64 < (uint64_t *) rd.s_top) {
859 AB2(btd->prim_poly, btd->mask1, btd->mask2, prod, t1, t2);
860 if (val & amask) prod ^= ta;
868 gf_do_final_region_alignment(&rd);
871 #define BYTWO_P_ONESTEP {\
872 SSE_AB2(pp, m1, prod, t1, t2); \
873 t1 = _mm_and_si128(v, one); \
874 t1 = _mm_sub_epi8(t1, one); \
875 t1 = _mm_and_si128(t1, ta); \
876 prod = _mm_xor_si128(prod, t1); \
877 v = _mm_srli_epi64(v, 1); }
882 gf_w4_bytwo_p_sse_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
887 __m128i pp, m1, ta, prod, t1, t2, tp, one, v;
888 struct gf_bytwo_data *btd;
891 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
892 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
894 btd = (struct gf_bytwo_data *) ((gf_internal_t *) (gf->scratch))->private;
896 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 16);
897 gf_do_initial_region_alignment(&rd);
900 for (i = 0; i < 4; i++) {
902 if (!(val & (1 << i))) vrev |= 1;
905 s8 = (uint8_t *) rd.s_start;
906 d8 = (uint8_t *) rd.d_start;
908 pp = _mm_set1_epi8(btd->prim_poly&0xff);
909 m1 = _mm_set1_epi8((btd->mask1)&0xff);
910 one = _mm_set1_epi8(1);
912 while (d8 < (uint8_t *) rd.d_top) {
913 prod = _mm_setzero_si128();
914 v = _mm_set1_epi8(vrev);
915 ta = _mm_load_si128((__m128i *) s8);
916 tp = (!xor) ? _mm_setzero_si128() : _mm_load_si128((__m128i *) d8);
921 _mm_store_si128((__m128i *) d8, _mm_xor_si128(prod, tp));
925 gf_do_final_region_alignment(&rd);
933 gf_w4_bytwo_b_sse_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
935 uint8_t *d8, *s8, tb;
936 __m128i pp, m1, m2, t1, t2, va, vb;
937 struct gf_bytwo_data *btd;
940 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
941 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
943 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 16);
944 gf_do_initial_region_alignment(&rd);
946 s8 = (uint8_t *) rd.s_start;
947 d8 = (uint8_t *) rd.d_start;
949 btd = (struct gf_bytwo_data *) ((gf_internal_t *) (gf->scratch))->private;
951 pp = _mm_set1_epi8(btd->prim_poly&0xff);
952 m1 = _mm_set1_epi8((btd->mask1)&0xff);
953 m2 = _mm_set1_epi8((btd->mask2)&0xff);
956 while (d8 < (uint8_t *) rd.d_top) {
957 va = _mm_load_si128 ((__m128i *)(s8));
958 vb = _mm_load_si128 ((__m128i *)(d8));
961 if (tb & 1) vb = _mm_xor_si128(vb, va);
964 SSE_AB2(pp, m1, m2, va, t1, t2);
966 _mm_store_si128((__m128i *)d8, vb);
971 while (d8 < (uint8_t *) rd.d_top) {
972 va = _mm_load_si128 ((__m128i *)(s8));
973 vb = _mm_setzero_si128 ();
976 if (tb & 1) vb = _mm_xor_si128(vb, va);
979 t1 = _mm_and_si128(_mm_slli_epi64(va, 1), m1);
980 t2 = _mm_and_si128(va, m2);
982 _mm_slli_epi64(t2, 1), _mm_srli_epi64(t2, (GF_FIELD_WIDTH-1)));
983 va = _mm_xor_si128(t1, _mm_and_si128(t2, pp));
985 _mm_store_si128((__m128i *)d8, vb);
990 gf_do_final_region_alignment(&rd);
998 gf_w4_bytwo_b_sse_region_2_noxor(gf_region_data *rd, struct gf_bytwo_data *btd)
1001 __m128i pp, m1, t1, t2, va;
1003 s8 = (uint8_t *) rd->s_start;
1004 d8 = (uint8_t *) rd->d_start;
1006 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1007 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1009 while (d8 < (uint8_t *) rd->d_top) {
1010 va = _mm_load_si128 ((__m128i *)(s8));
1011 SSE_AB2(pp, m1, va, t1, t2);
1012 _mm_store_si128((__m128i *)d8, va);
1022 gf_w4_bytwo_b_sse_region_2_xor(gf_region_data *rd, struct gf_bytwo_data *btd)
1025 __m128i pp, m1, t1, t2, va, vb;
1027 s8 = (uint8_t *) rd->s_start;
1028 d8 = (uint8_t *) rd->d_start;
1030 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1031 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1033 while (d8 < (uint8_t *) rd->d_top) {
1034 va = _mm_load_si128 ((__m128i *)(s8));
1035 SSE_AB2(pp, m1, va, t1, t2);
1036 vb = _mm_load_si128 ((__m128i *)(d8));
1037 vb = _mm_xor_si128(vb, va);
1038 _mm_store_si128((__m128i *)d8, vb);
1048 gf_w4_bytwo_b_sse_region_4_noxor(gf_region_data *rd, struct gf_bytwo_data *btd)
1051 __m128i pp, m1, t1, t2, va;
1053 s8 = (uint8_t *) rd->s_start;
1054 d8 = (uint8_t *) rd->d_start;
1056 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1057 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1059 while (d8 < (uint8_t *) rd->d_top) {
1060 va = _mm_load_si128 ((__m128i *)(s8));
1061 SSE_AB2(pp, m1, va, t1, t2);
1062 SSE_AB2(pp, m1, va, t1, t2);
1063 _mm_store_si128((__m128i *)d8, va);
1073 gf_w4_bytwo_b_sse_region_4_xor(gf_region_data *rd, struct gf_bytwo_data *btd)
1076 __m128i pp, m1, t1, t2, va, vb;
1078 s8 = (uint8_t *) rd->s_start;
1079 d8 = (uint8_t *) rd->d_start;
1081 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1082 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1084 while (d8 < (uint8_t *) rd->d_top) {
1085 va = _mm_load_si128 ((__m128i *)(s8));
1086 SSE_AB2(pp, m1, va, t1, t2);
1087 SSE_AB2(pp, m1, va, t1, t2);
1088 vb = _mm_load_si128 ((__m128i *)(d8));
1089 vb = _mm_xor_si128(vb, va);
1090 _mm_store_si128((__m128i *)d8, vb);
1101 gf_w4_bytwo_b_sse_region_3_noxor(gf_region_data *rd, struct gf_bytwo_data *btd)
1104 __m128i pp, m1, t1, t2, va, vb;
1106 s8 = (uint8_t *) rd->s_start;
1107 d8 = (uint8_t *) rd->d_start;
1109 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1110 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1112 while (d8 < (uint8_t *) rd->d_top) {
1113 va = _mm_load_si128 ((__m128i *)(s8));
1115 SSE_AB2(pp, m1, va, t1, t2);
1116 va = _mm_xor_si128(va, vb);
1117 _mm_store_si128((__m128i *)d8, va);
1127 gf_w4_bytwo_b_sse_region_3_xor(gf_region_data *rd, struct gf_bytwo_data *btd)
1130 __m128i pp, m1, t1, t2, va, vb;
1132 s8 = (uint8_t *) rd->s_start;
1133 d8 = (uint8_t *) rd->d_start;
1135 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1136 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1138 while (d8 < (uint8_t *) rd->d_top) {
1139 va = _mm_load_si128 ((__m128i *)(s8));
1140 vb = _mm_xor_si128(_mm_load_si128 ((__m128i *)(d8)), va);
1141 SSE_AB2(pp, m1, va, t1, t2);
1142 vb = _mm_xor_si128(vb, va);
1143 _mm_store_si128((__m128i *)d8, vb);
1153 gf_w4_bytwo_b_sse_region_5_noxor(gf_region_data *rd, struct gf_bytwo_data *btd)
1156 __m128i pp, m1, t1, t2, va, vb;
1158 s8 = (uint8_t *) rd->s_start;
1159 d8 = (uint8_t *) rd->d_start;
1161 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1162 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1164 while (d8 < (uint8_t *) rd->d_top) {
1165 va = _mm_load_si128 ((__m128i *)(s8));
1167 SSE_AB2(pp, m1, va, t1, t2);
1168 SSE_AB2(pp, m1, va, t1, t2);
1169 va = _mm_xor_si128(va, vb);
1170 _mm_store_si128((__m128i *)d8, va);
1180 gf_w4_bytwo_b_sse_region_5_xor(gf_region_data *rd, struct gf_bytwo_data *btd)
1183 __m128i pp, m1, t1, t2, va, vb;
1185 s8 = (uint8_t *) rd->s_start;
1186 d8 = (uint8_t *) rd->d_start;
1188 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1189 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1191 while (d8 < (uint8_t *) rd->d_top) {
1192 va = _mm_load_si128 ((__m128i *)(s8));
1193 vb = _mm_xor_si128(_mm_load_si128 ((__m128i *)(d8)), va);
1194 SSE_AB2(pp, m1, va, t1, t2);
1195 SSE_AB2(pp, m1, va, t1, t2);
1196 vb = _mm_xor_si128(vb, va);
1197 _mm_store_si128((__m128i *)d8, vb);
1207 gf_w4_bytwo_b_sse_region_7_noxor(gf_region_data *rd, struct gf_bytwo_data *btd)
1210 __m128i pp, m1, t1, t2, va, vb;
1212 s8 = (uint8_t *) rd->s_start;
1213 d8 = (uint8_t *) rd->d_start;
1215 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1216 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1218 while (d8 < (uint8_t *) rd->d_top) {
1219 va = _mm_load_si128 ((__m128i *)(s8));
1221 SSE_AB2(pp, m1, va, t1, t2);
1222 vb = _mm_xor_si128(va, vb);
1223 SSE_AB2(pp, m1, va, t1, t2);
1224 va = _mm_xor_si128(va, vb);
1225 _mm_store_si128((__m128i *)d8, va);
1235 gf_w4_bytwo_b_sse_region_7_xor(gf_region_data *rd, struct gf_bytwo_data *btd)
1238 __m128i pp, m1, t1, t2, va, vb;
1240 s8 = (uint8_t *) rd->s_start;
1241 d8 = (uint8_t *) rd->d_start;
1243 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1244 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1246 while (d8 < (uint8_t *) rd->d_top) {
1247 va = _mm_load_si128 ((__m128i *)(s8));
1248 vb = _mm_xor_si128(_mm_load_si128 ((__m128i *)(d8)), va);
1249 SSE_AB2(pp, m1, va, t1, t2);
1250 vb = _mm_xor_si128(vb, va);
1251 SSE_AB2(pp, m1, va, t1, t2);
1252 vb = _mm_xor_si128(vb, va);
1253 _mm_store_si128((__m128i *)d8, vb);
1263 gf_w4_bytwo_b_sse_region_6_noxor(gf_region_data *rd, struct gf_bytwo_data *btd)
1266 __m128i pp, m1, t1, t2, va, vb;
1268 s8 = (uint8_t *) rd->s_start;
1269 d8 = (uint8_t *) rd->d_start;
1271 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1272 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1274 while (d8 < (uint8_t *) rd->d_top) {
1275 va = _mm_load_si128 ((__m128i *)(s8));
1276 SSE_AB2(pp, m1, va, t1, t2);
1278 SSE_AB2(pp, m1, va, t1, t2);
1279 va = _mm_xor_si128(va, vb);
1280 _mm_store_si128((__m128i *)d8, va);
1290 gf_w4_bytwo_b_sse_region_6_xor(gf_region_data *rd, struct gf_bytwo_data *btd)
1293 __m128i pp, m1, t1, t2, va, vb;
1295 s8 = (uint8_t *) rd->s_start;
1296 d8 = (uint8_t *) rd->d_start;
1298 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1299 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1301 while (d8 < (uint8_t *) rd->d_top) {
1302 va = _mm_load_si128 ((__m128i *)(s8));
1303 SSE_AB2(pp, m1, va, t1, t2);
1304 vb = _mm_xor_si128(_mm_load_si128 ((__m128i *)(d8)), va);
1305 SSE_AB2(pp, m1, va, t1, t2);
1306 vb = _mm_xor_si128(vb, va);
1307 _mm_store_si128((__m128i *)d8, vb);
1317 gf_w4_bytwo_b_sse_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
1319 uint8_t *d8, *s8, tb;
1320 __m128i pp, m1, m2, t1, t2, va, vb;
1321 struct gf_bytwo_data *btd;
1324 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
1325 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
1327 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 16);
1328 gf_do_initial_region_alignment(&rd);
1330 s8 = (uint8_t *) rd.s_start;
1331 d8 = (uint8_t *) rd.d_start;
1333 btd = (struct gf_bytwo_data *) ((gf_internal_t *) (gf->scratch))->private;
1338 gf_w4_bytwo_b_sse_region_2_noxor(&rd, btd);
1340 gf_w4_bytwo_b_sse_region_2_xor(&rd, btd);
1342 gf_do_final_region_alignment(&rd);
1346 gf_w4_bytwo_b_sse_region_3_noxor(&rd, btd);
1348 gf_w4_bytwo_b_sse_region_3_xor(&rd, btd);
1350 gf_do_final_region_alignment(&rd);
1354 gf_w4_bytwo_b_sse_region_4_noxor(&rd, btd);
1356 gf_w4_bytwo_b_sse_region_4_xor(&rd, btd);
1358 gf_do_final_region_alignment(&rd);
1362 gf_w4_bytwo_b_sse_region_5_noxor(&rd, btd);
1364 gf_w4_bytwo_b_sse_region_5_xor(&rd, btd);
1366 gf_do_final_region_alignment(&rd);
1370 gf_w4_bytwo_b_sse_region_6_noxor(&rd, btd);
1372 gf_w4_bytwo_b_sse_region_6_xor(&rd, btd);
1374 gf_do_final_region_alignment(&rd);
1378 gf_w4_bytwo_b_sse_region_7_noxor(&rd, btd);
1380 gf_w4_bytwo_b_sse_region_7_xor(&rd, btd);
1382 gf_do_final_region_alignment(&rd);
1386 pp = _mm_set1_epi8(btd->prim_poly&0xff);
1387 m1 = _mm_set1_epi8((btd->mask1)&0xff);
1388 m2 = _mm_set1_epi8((btd->mask2)&0xff);
1391 while (d8 < (uint8_t *) rd.d_top) {
1392 va = _mm_load_si128 ((__m128i *)(s8));
1393 vb = _mm_load_si128 ((__m128i *)(d8));
1396 if (tb & 1) vb = _mm_xor_si128(vb, va);
1399 SSE_AB2(pp, m1, va, t1, t2);
1401 _mm_store_si128((__m128i *)d8, vb);
1406 while (d8 < (uint8_t *) rd.d_top) {
1407 va = _mm_load_si128 ((__m128i *)(s8));
1408 vb = _mm_setzero_si128 ();
1411 if (tb & 1) vb = _mm_xor_si128(vb, va);
1414 t1 = _mm_and_si128(_mm_slli_epi64(va, 1), m1);
1415 t2 = _mm_and_si128(va, m2);
1416 t2 = _mm_sub_epi64 (
1417 _mm_slli_epi64(t2, 1), _mm_srli_epi64(t2, (GF_FIELD_WIDTH-1)));
1418 va = _mm_xor_si128(t1, _mm_and_si128(t2, pp));
1420 _mm_store_si128((__m128i *)d8, vb);
1425 gf_do_final_region_alignment(&rd);
1431 gf_w4_bytwo_b_nosse_multiply_region(gf_t *gf, void *src, void *dest, gf_val_32_t val, int bytes, int xor)
1433 uint64_t *s64, *d64, t1, t2, ta, tb, prod;
1434 struct gf_bytwo_data *btd;
1437 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
1438 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
1440 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 16);
1441 gf_do_initial_region_alignment(&rd);
1443 btd = (struct gf_bytwo_data *) ((gf_internal_t *) (gf->scratch))->private;
1444 s64 = (uint64_t *) rd.s_start;
1445 d64 = (uint64_t *) rd.d_start;
1450 while (d64 < (uint64_t *) rd.d_top) {
1456 while (d64 < (uint64_t *) rd.d_top) {
1465 while (d64 < (uint64_t *) rd.d_top) {
1467 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1473 while (d64 < (uint64_t *) rd.d_top) {
1475 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1484 while (d64 < (uint64_t *) rd.d_top) {
1487 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1488 *d64 ^= (ta ^ prod);
1493 while (d64 < (uint64_t *) rd.d_top) {
1496 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1505 while (d64 < (uint64_t *) rd.d_top) {
1507 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1508 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1514 while (d64 < (uint64_t *) rd.d_top) {
1516 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1517 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1526 while (d64 < (uint64_t *) rd.d_top) {
1529 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1530 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1531 *d64 ^= (ta ^ prod);
1536 while (d64 < (uint64_t *) rd.d_top) {
1539 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1540 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1549 while (d64 < (uint64_t *) rd.d_top) {
1551 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1553 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1554 *d64 ^= (ta ^ prod);
1559 while (d64 < (uint64_t *) rd.d_top) {
1561 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1563 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1572 while (d64 < (uint64_t *) rd.d_top) {
1575 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1577 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1578 *d64 ^= (ta ^ prod);
1583 while (d64 < (uint64_t *) rd.d_top) {
1586 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1588 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1597 while (d64 < (uint64_t *) rd.d_top) {
1599 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1600 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1601 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1607 while (d64 < (uint64_t *) rd.d_top) {
1609 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1610 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1611 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1620 while (d64 < (uint64_t *) rd.d_top) {
1623 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1624 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1625 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1626 *d64 ^= (ta ^ prod);
1631 while (d64 < (uint64_t *) rd.d_top) {
1634 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1635 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1636 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1645 while (d64 < (uint64_t *) rd.d_top) {
1647 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1649 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1650 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1651 *d64 ^= (ta ^ prod);
1656 while (d64 < (uint64_t *) rd.d_top) {
1658 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1660 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1661 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1670 while (d64 < (uint64_t *) rd.d_top) {
1673 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1675 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1676 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1677 *d64 ^= (ta ^ prod);
1682 while (d64 < (uint64_t *) rd.d_top) {
1685 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1687 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1688 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1697 while (d64 < (uint64_t *) rd.d_top) {
1699 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1700 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1702 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1703 *d64 ^= (ta ^ prod);
1708 while (d64 < (uint64_t *) rd.d_top) {
1710 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1711 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1713 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1722 while (d64 < (uint64_t *) rd.d_top) {
1725 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1726 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1728 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1729 *d64 ^= (ta ^ prod);
1734 while (d64 < (uint64_t *) rd.d_top) {
1737 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1738 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1740 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1749 while (d64 < (uint64_t *) rd.d_top) {
1751 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1753 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1755 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1756 *d64 ^= (ta ^ prod);
1761 while (d64 < (uint64_t *) rd.d_top) {
1763 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1765 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1767 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1776 while (d64 < (uint64_t *) rd.d_top) {
1779 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1781 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1783 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1784 *d64 ^= (ta ^ prod);
1789 while (d64 < (uint64_t *) rd.d_top) {
1792 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1794 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1796 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1805 while (d64 < (uint64_t *) rd.d_top) {
1810 if (tb & 1) prod ^= ta;
1813 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1820 while (d64 < (uint64_t *) rd.d_top) {
1825 if (tb & 1) prod ^= ta;
1828 AB2(btd->prim_poly, btd->mask1, btd->mask2, ta, t1, t2);
1837 gf_do_final_region_alignment(&rd);
1841 int gf_w4_bytwo_init(gf_t *gf)
1844 uint64_t ip, m1, m2;
1845 struct gf_bytwo_data *btd;
1847 h = (gf_internal_t *) gf->scratch;
1848 btd = (struct gf_bytwo_data *) (h->private);
1849 ip = h->prim_poly & 0xf;
1857 btd->prim_poly |= ip;
1860 ip <<= GF_FIELD_WIDTH;
1861 m1 <<= GF_FIELD_WIDTH;
1862 m2 <<= GF_FIELD_WIDTH;
1865 if (h->mult_type == GF_MULT_BYTWO_p) {
1866 SET_FUNCTION(gf,multiply,w32,gf_w4_bytwo_p_multiply)
1868 if (gf_cpu_supports_intel_sse2 && !(h->region_type & GF_REGION_NOSIMD)) {
1869 SET_FUNCTION(gf,multiply_region,w32,gf_w4_bytwo_p_sse_multiply_region)
1872 SET_FUNCTION(gf,multiply_region,w32,gf_w4_bytwo_p_nosse_multiply_region)
1873 if (h->region_type & GF_REGION_SIMD)
1879 SET_FUNCTION(gf,multiply,w32,gf_w4_bytwo_b_multiply)
1881 if (gf_cpu_supports_intel_sse2 && !(h->region_type & GF_REGION_NOSIMD)) {
1882 SET_FUNCTION(gf,multiply_region,w32,gf_w4_bytwo_b_sse_multiply_region)
1885 SET_FUNCTION(gf,multiply_region,w32,gf_w4_bytwo_b_nosse_multiply_region)
1886 if (h->region_type & GF_REGION_SIMD)
1897 int gf_w4_cfm_init(gf_t *gf)
1899 #if defined(INTEL_SSE4_PCLMUL)
1900 if (gf_cpu_supports_intel_pclmul) {
1901 SET_FUNCTION(gf,multiply,w32,gf_w4_clm_multiply)
1904 #elif defined(ARM_NEON)
1905 if (gf_cpu_supports_arm_neon) {
1906 return gf_w4_neon_cfm_init(gf);
1913 int gf_w4_shift_init(gf_t *gf)
1915 SET_FUNCTION(gf,multiply,w32,gf_w4_shift_multiply)
1919 /* JSP: I'm putting all error-checking into gf_error_check(), so you don't
1920 have to do error checking in scratch_size or in init */
1922 int gf_w4_scratch_size(int mult_type, int region_type, int divide_type, int arg1, int arg2)
1926 case GF_MULT_BYTWO_p:
1927 case GF_MULT_BYTWO_b:
1928 return sizeof(gf_internal_t) + sizeof(struct gf_bytwo_data);
1930 case GF_MULT_DEFAULT:
1932 if (region_type == GF_REGION_CAUCHY) {
1933 return sizeof(gf_internal_t) + sizeof(struct gf_single_table_data) + 64;
1936 if (mult_type == GF_MULT_DEFAULT &&
1937 !(gf_cpu_supports_arm_neon || gf_cpu_supports_intel_ssse3))
1938 region_type = GF_REGION_DOUBLE_TABLE;
1940 if (region_type & GF_REGION_DOUBLE_TABLE) {
1941 return sizeof(gf_internal_t) + sizeof(struct gf_double_table_data) + 64;
1942 } else if (region_type & GF_REGION_QUAD_TABLE) {
1943 if ((region_type & GF_REGION_LAZY) == 0) {
1944 return sizeof(gf_internal_t) + sizeof(struct gf_quad_table_data) + 64;
1946 return sizeof(gf_internal_t) + sizeof(struct gf_quad_table_lazy_data) + 64;
1949 return sizeof(gf_internal_t) + sizeof(struct gf_single_table_data) + 64;
1953 case GF_MULT_LOG_TABLE:
1954 return sizeof(gf_internal_t) + sizeof(struct gf_logtable_data) + 64;
1956 case GF_MULT_CARRY_FREE:
1957 return sizeof(gf_internal_t);
1960 return sizeof(gf_internal_t);
1969 gf_w4_init (gf_t *gf)
1973 h = (gf_internal_t *) gf->scratch;
1974 if (h->prim_poly == 0) h->prim_poly = 0x13;
1975 h->prim_poly |= 0x10;
1976 SET_FUNCTION(gf,multiply,w32,NULL)
1977 SET_FUNCTION(gf,divide,w32,NULL)
1978 SET_FUNCTION(gf,inverse,w32,NULL)
1979 SET_FUNCTION(gf,multiply_region,w32,NULL)
1980 SET_FUNCTION(gf,extract_word,w32,gf_w4_extract_word)
1982 switch(h->mult_type) {
1983 case GF_MULT_CARRY_FREE: if (gf_w4_cfm_init(gf) == 0) return 0; break;
1984 case GF_MULT_SHIFT: if (gf_w4_shift_init(gf) == 0) return 0; break;
1985 case GF_MULT_BYTWO_p:
1986 case GF_MULT_BYTWO_b: if (gf_w4_bytwo_init(gf) == 0) return 0; break;
1987 case GF_MULT_LOG_TABLE: if (gf_w4_log_init(gf) == 0) return 0; break;
1988 case GF_MULT_DEFAULT:
1989 case GF_MULT_TABLE: if (gf_w4_table_init(gf) == 0) return 0; break;
1993 if (h->divide_type == GF_DIVIDE_EUCLID) {
1994 SET_FUNCTION(gf,divide,w32,gf_w4_divide_from_inverse)
1995 SET_FUNCTION(gf,inverse,w32,gf_w4_euclid)
1996 } else if (h->divide_type == GF_DIVIDE_MATRIX) {
1997 SET_FUNCTION(gf,divide,w32,gf_w4_divide_from_inverse)
1998 SET_FUNCTION(gf,inverse,w32,gf_w4_matrix)
2001 if (gf->divide.w32 == NULL) {
2002 SET_FUNCTION(gf,divide,w32,gf_w4_divide_from_inverse)
2003 if (gf->inverse.w32 == NULL) SET_FUNCTION(gf,inverse,w32,gf_w4_euclid)
2006 if (gf->inverse.w32 == NULL) SET_FUNCTION(gf,inverse,w32,gf_w4_inverse_from_divide)
2008 if (h->region_type == GF_REGION_CAUCHY) {
2009 SET_FUNCTION(gf,multiply_region,w32,gf_wgen_cauchy_region)
2010 SET_FUNCTION(gf,extract_word,w32,gf_wgen_extract_word)
2013 if (gf->multiply_region.w32 == NULL) {
2014 SET_FUNCTION(gf,multiply_region,w32,gf_w4_multiply_region_from_single)
2020 /* Inline setup functions */
2022 uint8_t *gf_w4_get_mult_table(gf_t *gf)
2025 struct gf_single_table_data *std;
2027 h = (gf_internal_t *) gf->scratch;
2028 if (gf->multiply.w32 == gf_w4_single_table_multiply) {
2029 std = (struct gf_single_table_data *) h->private;
2030 return (uint8_t *) std->mult;
2035 uint8_t *gf_w4_get_div_table(gf_t *gf)
2038 struct gf_single_table_data *std;
2040 h = (gf_internal_t *) gf->scratch;
2041 if (gf->multiply.w32 == gf_w4_single_table_multiply) {
2042 std = (struct gf_single_table_data *) h->private;
2043 return (uint8_t *) std->div;