Lines Matching +full:p +full:- +full:256
1 /* gf128mul.c - GF(2^128) multiplication functions
17 ---------------------------------------------------------------------------
44 ---------------------------------------------------------------------------
94 * 16-bit value that must be XOR-ed into the low-degree end of the
98 * the "be" convention where the highest-order bit is the coefficient of
99 * the highest-degree polynomial term, and one for the "le" convention
100 * where the highest-order bit is the coefficient of the lowest-degree
129 static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
130 static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
134 * the polynomial field representation. They use 64-bit word operations
141 u64 a = be64_to_cpu(x->a); in gf128mul_x8_lle()
142 u64 b = be64_to_cpu(x->b); in gf128mul_x8_lle()
145 x->b = cpu_to_be64((b >> 8) | (a << 56)); in gf128mul_x8_lle()
146 x->a = cpu_to_be64((a >> 8) ^ (_tt << 48)); in gf128mul_x8_lle()
152 u64 a = be64_to_cpu(x->a); in gf128mul_x8_lle_ti()
153 u64 b = be64_to_cpu(x->b); in gf128mul_x8_lle_ti()
156 x->b = cpu_to_be64((b >> 8) | (a << 56)); in gf128mul_x8_lle_ti()
157 x->a = cpu_to_be64((a >> 8) ^ (_tt << 48)); in gf128mul_x8_lle_ti()
162 u64 a = be64_to_cpu(x->a); in gf128mul_x8_bbe()
163 u64 b = be64_to_cpu(x->b); in gf128mul_x8_bbe()
166 x->a = cpu_to_be64((a << 8) | (b >> 56)); in gf128mul_x8_bbe()
167 x->b = cpu_to_be64((b << 8) ^ _tt); in gf128mul_x8_bbe()
172 u64 a = le64_to_cpu(x->a); in gf128mul_x8_ble()
173 u64 b = le64_to_cpu(x->b); in gf128mul_x8_ble()
176 r->a = cpu_to_le64((a << 8) | (b >> 56)); in gf128mul_x8_ble()
177 r->b = cpu_to_le64((b << 8) ^ _tt); in gf128mul_x8_ble()
184 * The p array should be aligned to twice the size of its element type, in gf128mul_lle()
200 be128 *p = PTR_ALIGN(&array[0], 2 * sizeof(be128)); in gf128mul_lle() local
203 p[0] = *r; in gf128mul_lle()
205 gf128mul_x_lle(&p[2 * i + 2], &p[2 * i]); in gf128mul_lle()
209 u8 ch = ((u8 *)b)[15 - i]; in gf128mul_lle()
211 be128_xor(r, r, &p[ 0 + !(ch & 0x80)]); in gf128mul_lle()
212 be128_xor(r, r, &p[ 2 + !(ch & 0x40)]); in gf128mul_lle()
213 be128_xor(r, r, &p[ 4 + !(ch & 0x20)]); in gf128mul_lle()
214 be128_xor(r, r, &p[ 6 + !(ch & 0x10)]); in gf128mul_lle()
215 be128_xor(r, r, &p[ 8 + !(ch & 0x08)]); in gf128mul_lle()
216 be128_xor(r, r, &p[10 + !(ch & 0x04)]); in gf128mul_lle()
217 be128_xor(r, r, &p[12 + !(ch & 0x02)]); in gf128mul_lle()
218 be128_xor(r, r, &p[14 + !(ch & 0x01)]); in gf128mul_lle()
232 the 256 16 byte values that result from the 256 values
252 t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL); in gf128mul_init_64k_bbe()
253 if (!t->t[i]) { in gf128mul_init_64k_bbe()
260 t->t[0]->t[1] = *g; in gf128mul_init_64k_bbe()
262 gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]); in gf128mul_init_64k_bbe()
265 for (j = 2; j < 256; j += j) in gf128mul_init_64k_bbe()
267 be128_xor(&t->t[i]->t[j + k], in gf128mul_init_64k_bbe()
268 &t->t[i]->t[j], &t->t[i]->t[k]); in gf128mul_init_64k_bbe()
274 t->t[i]->t[j] = t->t[i - 1]->t[j]; in gf128mul_init_64k_bbe()
275 gf128mul_x8_bbe(&t->t[i]->t[j]); in gf128mul_init_64k_bbe()
289 kfree_sensitive(t->t[i]); in gf128mul_free_64k()
300 *r = t->t[0]->t[ap[15]]; in gf128mul_64k_bbe()
302 be128_xor(r, r, &t->t[i]->t[ap[15 - i]]); in gf128mul_64k_bbe()
310 single byte, we can construct a table of the 256 16 byte
311 values that result from the 256 values of this byte.
332 t->t[128] = *g; in gf128mul_init_4k_lle()
334 gf128mul_x_lle(&t->t[j], &t->t[j+j]); in gf128mul_init_4k_lle()
336 for (j = 2; j < 256; j += j) in gf128mul_init_4k_lle()
338 be128_xor(&t->t[j + k], &t->t[j], &t->t[k]); in gf128mul_init_4k_lle()
351 *r = t->t[ap[15]]; in gf128mul_4k_lle()
352 while (i--) { in gf128mul_4k_lle()
354 be128_xor(r, r, &t->t[ap[i]]); in gf128mul_4k_lle()