2 * GF-Complete: A Comprehensive Open Source Library for Galois Field Arithmetic
3 * James S. Plank, Ethan L. Miller, Kevin M. Greenan,
4 * Benjamin A. Arnold, John A. Burnum, Adam W. Disney, Allen C. McBride.
6 * Copyright (c) 2014: Janne Grunau <j@jannau.net>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * - Neither the name of the University of Tennessee nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
27 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
28 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
31 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
34 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Neon routines for 32-bit Galois fields
50 #define vqtbl1q_u8(tbl, v) vcombine_u8(vtbl2_u8(tbl, vget_low_u8(v)), \
51 vtbl2_u8(tbl, vget_high_u8(v)))
56 neon_w32_split_4_32_multiply_region(gf_t *gf, uint32_t *src, uint32_t *dst,
57 uint32_t *d_end, uint8_t btable[8][4][16],
58 uint32_t val, int xor, int altmap)
62 uint8x16_t tables[8][4];
64 uint8x8x2_t tables[8][4];
66 uint32x4_t v0, v1, v2, v3, s0, s1, s2, s3;
67 uint8x16_t p0, p1, p2, p3, si, mask1;
71 for (i = 0; i < 8; i++) {
72 for (j = 0; j < 4; j++) {
74 tables[i][j] = vld1q_u8(btable[i][j]);
76 tables[i][j].val[0] = vld1_u8(btable[i][j]);
77 tables[i][j].val[1] = vld1_u8(btable[i][j] + 8);
82 mask1 = vdupq_n_u8(0xf);
86 v0 = vld1q_u32(src); src += 4;
87 v1 = vld1q_u32(src); src += 4;
88 v2 = vld1q_u32(src); src += 4;
89 v3 = vld1q_u32(src); src += 4;
92 q0.val[0] = vreinterpretq_u8_u32(v0);
93 q0.val[1] = vreinterpretq_u8_u32(v1);
94 q1.val[0] = vreinterpretq_u8_u32(v2);
95 q1.val[1] = vreinterpretq_u8_u32(v3);
97 r0 = vtrnq_u16(vreinterpretq_u16_u32(v0), vreinterpretq_u16_u32(v2));
98 r1 = vtrnq_u16(vreinterpretq_u16_u32(v1), vreinterpretq_u16_u32(v3));
100 q0 = vtrnq_u8(vreinterpretq_u8_u16(r0.val[0]),
101 vreinterpretq_u8_u16(r1.val[0]));
102 q1 = vtrnq_u8(vreinterpretq_u8_u16(r0.val[1]),
103 vreinterpretq_u8_u16(r1.val[1]));
106 si = vandq_u8(q0.val[0], mask1);
107 p0 = vqtbl1q_u8(tables[0][0], si);
108 p1 = vqtbl1q_u8(tables[0][1], si);
109 p2 = vqtbl1q_u8(tables[0][2], si);
110 p3 = vqtbl1q_u8(tables[0][3], si);
112 si = vshrq_n_u8(q0.val[0], 4);
113 p0 = veorq_u8(p0, vqtbl1q_u8(tables[1][0], si));
114 p1 = veorq_u8(p1, vqtbl1q_u8(tables[1][1], si));
115 p2 = veorq_u8(p2, vqtbl1q_u8(tables[1][2], si));
116 p3 = veorq_u8(p3, vqtbl1q_u8(tables[1][3], si));
118 si = vandq_u8(q0.val[1], mask1);
119 p0 = veorq_u8(p0, vqtbl1q_u8(tables[2][0], si));
120 p1 = veorq_u8(p1, vqtbl1q_u8(tables[2][1], si));
121 p2 = veorq_u8(p2, vqtbl1q_u8(tables[2][2], si));
122 p3 = veorq_u8(p3, vqtbl1q_u8(tables[2][3], si));
124 si = vshrq_n_u8(q0.val[1], 4);
125 p0 = veorq_u8(p0, vqtbl1q_u8(tables[3][0], si));
126 p1 = veorq_u8(p1, vqtbl1q_u8(tables[3][1], si));
127 p2 = veorq_u8(p2, vqtbl1q_u8(tables[3][2], si));
128 p3 = veorq_u8(p3, vqtbl1q_u8(tables[3][3], si));
130 si = vandq_u8(q1.val[0], mask1);
131 p0 = veorq_u8(p0, vqtbl1q_u8(tables[4][0], si));
132 p1 = veorq_u8(p1, vqtbl1q_u8(tables[4][1], si));
133 p2 = veorq_u8(p2, vqtbl1q_u8(tables[4][2], si));
134 p3 = veorq_u8(p3, vqtbl1q_u8(tables[4][3], si));
136 si = vshrq_n_u8(q1.val[0], 4);
137 p0 = veorq_u8(p0, vqtbl1q_u8(tables[5][0], si));
138 p1 = veorq_u8(p1, vqtbl1q_u8(tables[5][1], si));
139 p2 = veorq_u8(p2, vqtbl1q_u8(tables[5][2], si));
140 p3 = veorq_u8(p3, vqtbl1q_u8(tables[5][3], si));
142 si = vandq_u8(q1.val[1], mask1);
143 p0 = veorq_u8(p0, vqtbl1q_u8(tables[6][0], si));
144 p1 = veorq_u8(p1, vqtbl1q_u8(tables[6][1], si));
145 p2 = veorq_u8(p2, vqtbl1q_u8(tables[6][2], si));
146 p3 = veorq_u8(p3, vqtbl1q_u8(tables[6][3], si));
148 si = vshrq_n_u8(q1.val[1], 4);
149 p0 = veorq_u8(p0, vqtbl1q_u8(tables[7][0], si));
150 p1 = veorq_u8(p1, vqtbl1q_u8(tables[7][1], si));
151 p2 = veorq_u8(p2, vqtbl1q_u8(tables[7][2], si));
152 p3 = veorq_u8(p3, vqtbl1q_u8(tables[7][3], si));
155 s0 = vreinterpretq_u32_u8(p0);
156 s1 = vreinterpretq_u32_u8(p1);
157 s2 = vreinterpretq_u32_u8(p2);
158 s3 = vreinterpretq_u32_u8(p3);
160 q0 = vtrnq_u8(p0, p1);
161 q1 = vtrnq_u8(p2, p3);
163 r0 = vtrnq_u16(vreinterpretq_u16_u8(q0.val[0]),
164 vreinterpretq_u16_u8(q1.val[0]));
165 r1 = vtrnq_u16(vreinterpretq_u16_u8(q0.val[1]),
166 vreinterpretq_u16_u8(q1.val[1]));
168 s0 = vreinterpretq_u32_u16(r0.val[0]);
169 s1 = vreinterpretq_u32_u16(r1.val[0]);
170 s2 = vreinterpretq_u32_u16(r0.val[1]);
171 s3 = vreinterpretq_u32_u16(r1.val[1]);
176 v1 = vld1q_u32(dst + 4);
177 v2 = vld1q_u32(dst + 8);
178 v3 = vld1q_u32(dst + 12);
179 s0 = veorq_u32(s0, v0);
180 s1 = veorq_u32(s1, v1);
181 s2 = veorq_u32(s2, v2);
182 s3 = veorq_u32(s3, v3);
186 vst1q_u32(dst + 4, s1);
187 vst1q_u32(dst + 8, s2);
188 vst1q_u32(dst + 12, s3);
197 neon_w32_split_4_32_lazy_multiply_region(gf_t *gf, void *src, void *dest, uint32_t val, int bytes, int xor, int altmap)
201 uint32_t pp, v, *s32, *d32, *top, tmp_table[16];
202 uint8_t btable[8][4][16];
205 if (val == 0) { gf_multby_zero(dest, bytes, xor); return; }
206 if (val == 1) { gf_multby_one(src, dest, bytes, xor); return; }
208 h = (gf_internal_t *) gf->scratch;
211 gf_set_region_data(&rd, gf, src, dest, bytes, val, xor, 64);
212 gf_do_initial_region_alignment(&rd);
214 s32 = (uint32_t *) rd.s_start;
215 d32 = (uint32_t *) rd.d_start;
216 top = (uint32_t *) rd.d_top;
219 for (i = 0; i < 8; i++) {
221 for (j = 1; j < 16; j <<= 1) {
222 for (k = 0; k < j; k++) {
223 tmp_table[k^j] = (v ^ tmp_table[k]);
225 v = (v & GF_FIRST_BIT) ? ((v << 1) ^ pp) : (v << 1);
227 for (j = 0; j < 4; j++) {
228 for (k = 0; k < 16; k++) {
229 btable[i][j][k] = (uint8_t) tmp_table[k];
236 neon_w32_split_4_32_multiply_region(gf, s32, d32, top, btable, val, 1, altmap);
238 neon_w32_split_4_32_multiply_region(gf, s32, d32, top, btable, val, 0, altmap);
240 gf_do_final_region_alignment(&rd);
245 gf_w32_split_4_32_lazy_multiply_region_neon(gf_t *gf, void *src, void *dest,
246 gf_val_32_t val, int bytes, int xor)
248 neon_w32_split_4_32_lazy_multiply_region(gf, src, dest, val, bytes, xor, 0);
253 gf_w32_split_4_32_lazy_altmap_multiply_region_neon(gf_t *gf, void *src,
254 void *dest, gf_val_32_t val,
257 neon_w32_split_4_32_lazy_multiply_region(gf, src, dest, val, bytes, xor, 1);
260 void gf_w32_neon_split_init(gf_t *gf)
262 gf_internal_t *h = (gf_internal_t *) gf->scratch;
264 if (h->region_type & GF_REGION_ALTMAP)
265 SET_FUNCTION(gf,multiply_region,w32,gf_w32_split_4_32_lazy_altmap_multiply_region_neon)
267 SET_FUNCTION(gf,multiply_region,w32,gf_w32_split_4_32_lazy_multiply_region_neon)