23 #include "../internal.h"
26 #if defined(BORINGSSL_HAS_UINT128) && defined(OPENSSL_X86_64)
28 #include <emmintrin.h>
48 static const alignas(16)
uint32_t poly1305_x64_sse2_message_mask[4] = {
49 (1 << 26) - 1, 0, (1 << 26) - 1, 0};
50 static const alignas(16)
uint32_t poly1305_x64_sse2_5[4] = {5, 0, 5, 0};
51 static const alignas(16)
uint32_t poly1305_x64_sse2_1shl128[4] = {
52 (1 << 24), 0, (1 << 24), 0};
54 static inline uint128_t add128(uint128_t
a, uint128_t
b) {
return a +
b; }
56 static inline uint128_t add128_64(uint128_t
a,
uint64_t b) {
return a +
b; }
59 return (uint128_t)
a *
b;
64 static inline uint64_t shr128(uint128_t
v,
const int shift) {
69 return (
uint64_t)((((uint128_t)hi << 64) | lo) >> shift);
72 typedef struct poly1305_power_t {
77 } R20, R21, R22, R23, R24,
S21, S22, S23, S24;
80 typedef struct poly1305_state_internal_t {
92 } poly1305_state_internal;
96 sizeof(
struct poly1305_state_internal_t) + 63 <=
sizeof(
poly1305_state),
97 "poly1305_state isn't large enough to hold aligned poly1305_state_internal_t");
101 return (poly1305_state_internal *)(((
uint64_t)
state + 63) & ~63);
104 static inline size_t poly1305_min(
size_t a,
size_t b) {
105 return (
a <
b) ?
a :
b;
115 t0 = load_u64_le(
key + 0);
116 t1 = load_u64_le(
key + 8);
117 r0 =
t0 & 0xffc0fffffff;
120 r1 =
t0 & 0xfffffc0ffff;
122 r2 =
t1 & 0x00ffffffc0f;
134 p->R23.d[1] = load_u32_le(
key + 16);
135 p->R23.d[3] = load_u32_le(
key + 20);
136 p->R24.d[1] = load_u32_le(
key + 24);
137 p->R24.d[3] = load_u32_le(
key + 28);
140 st->H[0] = _mm_setzero_si128();
141 st->H[1] = _mm_setzero_si128();
142 st->H[2] = _mm_setzero_si128();
143 st->H[3] = _mm_setzero_si128();
144 st->H[4] = _mm_setzero_si128();
150 static void poly1305_first_block(poly1305_state_internal *st,
152 const xmmi MMASK = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_message_mask);
153 const xmmi FIVE = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_5);
154 const xmmi HIBIT = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_1shl128);
178 s22 = r22 * (5 << 2);
180 d[0] = add128(mul64x64_128(r20, r20), mul64x64_128(r21 * 2, s22));
181 d[1] = add128(mul64x64_128(r22, s22), mul64x64_128(r20 * 2, r21));
182 d[2] = add128(mul64x64_128(r21, r21), mul64x64_128(r22 * 2, r20));
184 r20 = lo128(
d[0]) & 0xfffffffffff;
185 c = shr128(
d[0], 44);
186 d[1] = add128_64(
d[1],
c);
187 r21 = lo128(
d[1]) & 0xfffffffffff;
188 c = shr128(
d[1], 44);
189 d[2] = add128_64(
d[2],
c);
190 r22 = lo128(
d[2]) & 0x3ffffffffff;
191 c = shr128(
d[2], 42);
194 r20 = r20 & 0xfffffffffff;
197 p->R20.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((
uint32_t)(r20)&0x3ffffff),
198 _MM_SHUFFLE(1, 0, 1, 0));
199 p->R21.v = _mm_shuffle_epi32(
200 _mm_cvtsi32_si128((
uint32_t)((r20 >> 26) | (r21 << 18)) & 0x3ffffff),
201 _MM_SHUFFLE(1, 0, 1, 0));
203 _mm_shuffle_epi32(_mm_cvtsi32_si128((
uint32_t)((r21 >> 8)) & 0x3ffffff),
204 _MM_SHUFFLE(1, 0, 1, 0));
205 p->R23.v = _mm_shuffle_epi32(
206 _mm_cvtsi32_si128((
uint32_t)((r21 >> 34) | (r22 << 10)) & 0x3ffffff),
207 _MM_SHUFFLE(1, 0, 1, 0));
208 p->R24.v = _mm_shuffle_epi32(_mm_cvtsi32_si128((
uint32_t)((r22 >> 16))),
209 _MM_SHUFFLE(1, 0, 1, 0));
210 p->S21.v = _mm_mul_epu32(
p->R21.v, FIVE);
211 p->S22.v = _mm_mul_epu32(
p->R22.v, FIVE);
212 p->S23.v = _mm_mul_epu32(
p->R23.v, FIVE);
213 p->S24.v = _mm_mul_epu32(
p->R24.v, FIVE);
231 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 0)),
232 _mm_loadl_epi64((
const xmmi *)(
m + 16)));
233 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 8)),
234 _mm_loadl_epi64((
const xmmi *)(
m + 24)));
235 st->H[0] = _mm_and_si128(MMASK,
T5);
236 st->H[1] = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
237 T5 = _mm_or_si128(_mm_srli_epi64(
T5, 52), _mm_slli_epi64(T6, 12));
238 st->H[2] = _mm_and_si128(MMASK,
T5);
239 st->H[3] = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
240 st->H[4] = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
243 static void poly1305_blocks(poly1305_state_internal *st,
const uint8_t *
m,
245 const xmmi MMASK = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_message_mask);
246 const xmmi FIVE = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_5);
247 const xmmi HIBIT = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_1shl128);
250 xmmi H0,
H1,
H2, H3, H4;
252 xmmi M0, M1, M2, M3, M4;
261 while (
bytes >= 64) {
264 T0 = _mm_mul_epu32(H0,
p->R20.v);
265 T1 = _mm_mul_epu32(H0,
p->R21.v);
266 T2 = _mm_mul_epu32(H0,
p->R22.v);
267 T3 = _mm_mul_epu32(H0,
p->R23.v);
268 T4 = _mm_mul_epu32(H0,
p->R24.v);
269 T5 = _mm_mul_epu32(
H1,
p->S24.v);
270 T6 = _mm_mul_epu32(
H1,
p->R20.v);
271 T0 = _mm_add_epi64(T0,
T5);
272 T1 = _mm_add_epi64(
T1, T6);
273 T5 = _mm_mul_epu32(
H2,
p->S23.v);
274 T6 = _mm_mul_epu32(
H2,
p->S24.v);
275 T0 = _mm_add_epi64(T0,
T5);
276 T1 = _mm_add_epi64(
T1, T6);
277 T5 = _mm_mul_epu32(H3,
p->S22.v);
278 T6 = _mm_mul_epu32(H3,
p->S23.v);
279 T0 = _mm_add_epi64(T0,
T5);
280 T1 = _mm_add_epi64(
T1, T6);
281 T5 = _mm_mul_epu32(H4,
p->S21.v);
282 T6 = _mm_mul_epu32(H4,
p->S22.v);
283 T0 = _mm_add_epi64(T0,
T5);
284 T1 = _mm_add_epi64(
T1, T6);
285 T5 = _mm_mul_epu32(
H1,
p->R21.v);
286 T6 = _mm_mul_epu32(
H1,
p->R22.v);
287 T2 = _mm_add_epi64(
T2,
T5);
288 T3 = _mm_add_epi64(
T3, T6);
289 T5 = _mm_mul_epu32(
H2,
p->R20.v);
290 T6 = _mm_mul_epu32(
H2,
p->R21.v);
291 T2 = _mm_add_epi64(
T2,
T5);
292 T3 = _mm_add_epi64(
T3, T6);
293 T5 = _mm_mul_epu32(H3,
p->S24.v);
294 T6 = _mm_mul_epu32(H3,
p->R20.v);
295 T2 = _mm_add_epi64(
T2,
T5);
296 T3 = _mm_add_epi64(
T3, T6);
297 T5 = _mm_mul_epu32(H4,
p->S23.v);
298 T6 = _mm_mul_epu32(H4,
p->S24.v);
299 T2 = _mm_add_epi64(
T2,
T5);
300 T3 = _mm_add_epi64(
T3, T6);
301 T5 = _mm_mul_epu32(
H1,
p->R23.v);
302 T4 = _mm_add_epi64(
T4,
T5);
303 T5 = _mm_mul_epu32(
H2,
p->R22.v);
304 T4 = _mm_add_epi64(
T4,
T5);
305 T5 = _mm_mul_epu32(H3,
p->R21.v);
306 T4 = _mm_add_epi64(
T4,
T5);
307 T5 = _mm_mul_epu32(H4,
p->R20.v);
308 T4 = _mm_add_epi64(
T4,
T5);
311 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 0)),
312 _mm_loadl_epi64((
const xmmi *)(
m + 16)));
313 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 8)),
314 _mm_loadl_epi64((
const xmmi *)(
m + 24)));
315 M0 = _mm_and_si128(MMASK,
T5);
316 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
317 T5 = _mm_or_si128(_mm_srli_epi64(
T5, 52), _mm_slli_epi64(T6, 12));
318 M2 = _mm_and_si128(MMASK,
T5);
319 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
320 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
323 T5 = _mm_mul_epu32(M0,
p->R20.v);
324 T6 = _mm_mul_epu32(M0,
p->R21.v);
325 T0 = _mm_add_epi64(T0,
T5);
326 T1 = _mm_add_epi64(
T1, T6);
327 T5 = _mm_mul_epu32(M1,
p->S24.v);
328 T6 = _mm_mul_epu32(M1,
p->R20.v);
329 T0 = _mm_add_epi64(T0,
T5);
330 T1 = _mm_add_epi64(
T1, T6);
331 T5 = _mm_mul_epu32(M2,
p->S23.v);
332 T6 = _mm_mul_epu32(M2,
p->S24.v);
333 T0 = _mm_add_epi64(T0,
T5);
334 T1 = _mm_add_epi64(
T1, T6);
335 T5 = _mm_mul_epu32(M3,
p->S22.v);
336 T6 = _mm_mul_epu32(M3,
p->S23.v);
337 T0 = _mm_add_epi64(T0,
T5);
338 T1 = _mm_add_epi64(
T1, T6);
339 T5 = _mm_mul_epu32(M4,
p->S21.v);
340 T6 = _mm_mul_epu32(M4,
p->S22.v);
341 T0 = _mm_add_epi64(T0,
T5);
342 T1 = _mm_add_epi64(
T1, T6);
343 T5 = _mm_mul_epu32(M0,
p->R22.v);
344 T6 = _mm_mul_epu32(M0,
p->R23.v);
345 T2 = _mm_add_epi64(
T2,
T5);
346 T3 = _mm_add_epi64(
T3, T6);
347 T5 = _mm_mul_epu32(M1,
p->R21.v);
348 T6 = _mm_mul_epu32(M1,
p->R22.v);
349 T2 = _mm_add_epi64(
T2,
T5);
350 T3 = _mm_add_epi64(
T3, T6);
351 T5 = _mm_mul_epu32(M2,
p->R20.v);
352 T6 = _mm_mul_epu32(M2,
p->R21.v);
353 T2 = _mm_add_epi64(
T2,
T5);
354 T3 = _mm_add_epi64(
T3, T6);
355 T5 = _mm_mul_epu32(M3,
p->S24.v);
356 T6 = _mm_mul_epu32(M3,
p->R20.v);
357 T2 = _mm_add_epi64(
T2,
T5);
358 T3 = _mm_add_epi64(
T3, T6);
359 T5 = _mm_mul_epu32(M4,
p->S23.v);
360 T6 = _mm_mul_epu32(M4,
p->S24.v);
361 T2 = _mm_add_epi64(
T2,
T5);
362 T3 = _mm_add_epi64(
T3, T6);
363 T5 = _mm_mul_epu32(M0,
p->R24.v);
364 T4 = _mm_add_epi64(
T4,
T5);
365 T5 = _mm_mul_epu32(M1,
p->R23.v);
366 T4 = _mm_add_epi64(
T4,
T5);
367 T5 = _mm_mul_epu32(M2,
p->R22.v);
368 T4 = _mm_add_epi64(
T4,
T5);
369 T5 = _mm_mul_epu32(M3,
p->R21.v);
370 T4 = _mm_add_epi64(
T4,
T5);
371 T5 = _mm_mul_epu32(M4,
p->R20.v);
372 T4 = _mm_add_epi64(
T4,
T5);
375 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 32)),
376 _mm_loadl_epi64((
const xmmi *)(
m + 48)));
377 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 40)),
378 _mm_loadl_epi64((
const xmmi *)(
m + 56)));
379 M0 = _mm_and_si128(MMASK,
T5);
380 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
381 T5 = _mm_or_si128(_mm_srli_epi64(
T5, 52), _mm_slli_epi64(T6, 12));
382 M2 = _mm_and_si128(MMASK,
T5);
383 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
384 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
386 T0 = _mm_add_epi64(T0, M0);
387 T1 = _mm_add_epi64(
T1, M1);
388 T2 = _mm_add_epi64(
T2, M2);
389 T3 = _mm_add_epi64(
T3, M3);
390 T4 = _mm_add_epi64(
T4, M4);
393 C1 = _mm_srli_epi64(T0, 26);
394 C2 = _mm_srli_epi64(
T3, 26);
395 T0 = _mm_and_si128(T0, MMASK);
396 T3 = _mm_and_si128(
T3, MMASK);
397 T1 = _mm_add_epi64(
T1, C1);
398 T4 = _mm_add_epi64(
T4, C2);
399 C1 = _mm_srli_epi64(
T1, 26);
400 C2 = _mm_srli_epi64(
T4, 26);
401 T1 = _mm_and_si128(
T1, MMASK);
402 T4 = _mm_and_si128(
T4, MMASK);
403 T2 = _mm_add_epi64(
T2, C1);
404 T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
405 C1 = _mm_srli_epi64(
T2, 26);
406 C2 = _mm_srli_epi64(T0, 26);
407 T2 = _mm_and_si128(
T2, MMASK);
408 T0 = _mm_and_si128(T0, MMASK);
409 T3 = _mm_add_epi64(
T3, C1);
410 T1 = _mm_add_epi64(
T1, C2);
411 C1 = _mm_srli_epi64(
T3, 26);
412 T3 = _mm_and_si128(
T3, MMASK);
413 T4 = _mm_add_epi64(
T4, C1);
433 static size_t poly1305_combine(poly1305_state_internal *st,
const uint8_t *
m,
435 const xmmi MMASK = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_message_mask);
436 const xmmi HIBIT = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_1shl128);
437 const xmmi FIVE = _mm_load_si128((
const xmmi *)poly1305_x64_sse2_5);
440 xmmi H0,
H1,
H2, H3, H4;
441 xmmi M0, M1, M2, M3, M4;
461 T0 = _mm_mul_epu32(H0,
p->R20.v);
462 T1 = _mm_mul_epu32(H0,
p->R21.v);
463 T2 = _mm_mul_epu32(H0,
p->R22.v);
464 T3 = _mm_mul_epu32(H0,
p->R23.v);
465 T4 = _mm_mul_epu32(H0,
p->R24.v);
466 T5 = _mm_mul_epu32(
H1,
p->S24.v);
467 T6 = _mm_mul_epu32(
H1,
p->R20.v);
468 T0 = _mm_add_epi64(T0,
T5);
469 T1 = _mm_add_epi64(
T1, T6);
470 T5 = _mm_mul_epu32(
H2,
p->S23.v);
471 T6 = _mm_mul_epu32(
H2,
p->S24.v);
472 T0 = _mm_add_epi64(T0,
T5);
473 T1 = _mm_add_epi64(
T1, T6);
474 T5 = _mm_mul_epu32(H3,
p->S22.v);
475 T6 = _mm_mul_epu32(H3,
p->S23.v);
476 T0 = _mm_add_epi64(T0,
T5);
477 T1 = _mm_add_epi64(
T1, T6);
478 T5 = _mm_mul_epu32(H4,
p->S21.v);
479 T6 = _mm_mul_epu32(H4,
p->S22.v);
480 T0 = _mm_add_epi64(T0,
T5);
481 T1 = _mm_add_epi64(
T1, T6);
482 T5 = _mm_mul_epu32(
H1,
p->R21.v);
483 T6 = _mm_mul_epu32(
H1,
p->R22.v);
484 T2 = _mm_add_epi64(
T2,
T5);
485 T3 = _mm_add_epi64(
T3, T6);
486 T5 = _mm_mul_epu32(
H2,
p->R20.v);
487 T6 = _mm_mul_epu32(
H2,
p->R21.v);
488 T2 = _mm_add_epi64(
T2,
T5);
489 T3 = _mm_add_epi64(
T3, T6);
490 T5 = _mm_mul_epu32(H3,
p->S24.v);
491 T6 = _mm_mul_epu32(H3,
p->R20.v);
492 T2 = _mm_add_epi64(
T2,
T5);
493 T3 = _mm_add_epi64(
T3, T6);
494 T5 = _mm_mul_epu32(H4,
p->S23.v);
495 T6 = _mm_mul_epu32(H4,
p->S24.v);
496 T2 = _mm_add_epi64(
T2,
T5);
497 T3 = _mm_add_epi64(
T3, T6);
498 T5 = _mm_mul_epu32(
H1,
p->R23.v);
499 T4 = _mm_add_epi64(
T4,
T5);
500 T5 = _mm_mul_epu32(
H2,
p->R22.v);
501 T4 = _mm_add_epi64(
T4,
T5);
502 T5 = _mm_mul_epu32(H3,
p->R21.v);
503 T4 = _mm_add_epi64(
T4,
T5);
504 T5 = _mm_mul_epu32(H4,
p->R20.v);
505 T4 = _mm_add_epi64(
T4,
T5);
508 T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 0)),
509 _mm_loadl_epi64((
const xmmi *)(
m + 16)));
510 T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((
const xmmi *)(
m + 8)),
511 _mm_loadl_epi64((
const xmmi *)(
m + 24)));
512 M0 = _mm_and_si128(MMASK,
T5);
513 M1 = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
514 T5 = _mm_or_si128(_mm_srli_epi64(
T5, 52), _mm_slli_epi64(T6, 12));
515 M2 = _mm_and_si128(MMASK,
T5);
516 M3 = _mm_and_si128(MMASK, _mm_srli_epi64(
T5, 26));
517 M4 = _mm_or_si128(_mm_srli_epi64(T6, 40), HIBIT);
519 T0 = _mm_add_epi64(T0, M0);
520 T1 = _mm_add_epi64(
T1, M1);
521 T2 = _mm_add_epi64(
T2, M2);
522 T3 = _mm_add_epi64(
T3, M3);
523 T4 = _mm_add_epi64(
T4, M4);
526 C1 = _mm_srli_epi64(T0, 26);
527 C2 = _mm_srli_epi64(
T3, 26);
528 T0 = _mm_and_si128(T0, MMASK);
529 T3 = _mm_and_si128(
T3, MMASK);
530 T1 = _mm_add_epi64(
T1, C1);
531 T4 = _mm_add_epi64(
T4, C2);
532 C1 = _mm_srli_epi64(
T1, 26);
533 C2 = _mm_srli_epi64(
T4, 26);
534 T1 = _mm_and_si128(
T1, MMASK);
535 T4 = _mm_and_si128(
T4, MMASK);
536 T2 = _mm_add_epi64(
T2, C1);
537 T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
538 C1 = _mm_srli_epi64(
T2, 26);
539 C2 = _mm_srli_epi64(T0, 26);
540 T2 = _mm_and_si128(
T2, MMASK);
541 T0 = _mm_and_si128(T0, MMASK);
542 T3 = _mm_add_epi64(
T3, C1);
543 T1 = _mm_add_epi64(
T1, C2);
544 C1 = _mm_srli_epi64(
T3, 26);
545 T3 = _mm_and_si128(
T3, MMASK);
546 T4 = _mm_add_epi64(
T4, C1);
564 p->R21.d[2] = (
uint32_t)((r0 >> 26) | (r1 << 18)) & 0x3ffffff;
565 p->R22.d[2] = (
uint32_t)((r1 >> 8)) & 0x3ffffff;
566 p->R23.d[2] = (
uint32_t)((r1 >> 34) | (r2 << 10)) & 0x3ffffff;
568 p->S21.d[2] =
p->R21.d[2] * 5;
569 p->S22.d[2] =
p->R22.d[2] * 5;
570 p->S23.d[2] =
p->R23.d[2] * 5;
571 p->S24.d[2] =
p->R24.d[2] * 5;
574 T0 = _mm_mul_epu32(H0,
p->R20.v);
575 T1 = _mm_mul_epu32(H0,
p->R21.v);
576 T2 = _mm_mul_epu32(H0,
p->R22.v);
577 T3 = _mm_mul_epu32(H0,
p->R23.v);
578 T4 = _mm_mul_epu32(H0,
p->R24.v);
579 T5 = _mm_mul_epu32(
H1,
p->S24.v);
580 T6 = _mm_mul_epu32(
H1,
p->R20.v);
581 T0 = _mm_add_epi64(T0,
T5);
582 T1 = _mm_add_epi64(
T1, T6);
583 T5 = _mm_mul_epu32(
H2,
p->S23.v);
584 T6 = _mm_mul_epu32(
H2,
p->S24.v);
585 T0 = _mm_add_epi64(T0,
T5);
586 T1 = _mm_add_epi64(
T1, T6);
587 T5 = _mm_mul_epu32(H3,
p->S22.v);
588 T6 = _mm_mul_epu32(H3,
p->S23.v);
589 T0 = _mm_add_epi64(T0,
T5);
590 T1 = _mm_add_epi64(
T1, T6);
591 T5 = _mm_mul_epu32(H4,
p->S21.v);
592 T6 = _mm_mul_epu32(H4,
p->S22.v);
593 T0 = _mm_add_epi64(T0,
T5);
594 T1 = _mm_add_epi64(
T1, T6);
595 T5 = _mm_mul_epu32(
H1,
p->R21.v);
596 T6 = _mm_mul_epu32(
H1,
p->R22.v);
597 T2 = _mm_add_epi64(
T2,
T5);
598 T3 = _mm_add_epi64(
T3, T6);
599 T5 = _mm_mul_epu32(
H2,
p->R20.v);
600 T6 = _mm_mul_epu32(
H2,
p->R21.v);
601 T2 = _mm_add_epi64(
T2,
T5);
602 T3 = _mm_add_epi64(
T3, T6);
603 T5 = _mm_mul_epu32(H3,
p->S24.v);
604 T6 = _mm_mul_epu32(H3,
p->R20.v);
605 T2 = _mm_add_epi64(
T2,
T5);
606 T3 = _mm_add_epi64(
T3, T6);
607 T5 = _mm_mul_epu32(H4,
p->S23.v);
608 T6 = _mm_mul_epu32(H4,
p->S24.v);
609 T2 = _mm_add_epi64(
T2,
T5);
610 T3 = _mm_add_epi64(
T3, T6);
611 T5 = _mm_mul_epu32(
H1,
p->R23.v);
612 T4 = _mm_add_epi64(
T4,
T5);
613 T5 = _mm_mul_epu32(
H2,
p->R22.v);
614 T4 = _mm_add_epi64(
T4,
T5);
615 T5 = _mm_mul_epu32(H3,
p->R21.v);
616 T4 = _mm_add_epi64(
T4,
T5);
617 T5 = _mm_mul_epu32(H4,
p->R20.v);
618 T4 = _mm_add_epi64(
T4,
T5);
620 C1 = _mm_srli_epi64(T0, 26);
621 C2 = _mm_srli_epi64(
T3, 26);
622 T0 = _mm_and_si128(T0, MMASK);
623 T3 = _mm_and_si128(
T3, MMASK);
624 T1 = _mm_add_epi64(
T1, C1);
625 T4 = _mm_add_epi64(
T4, C2);
626 C1 = _mm_srli_epi64(
T1, 26);
627 C2 = _mm_srli_epi64(
T4, 26);
628 T1 = _mm_and_si128(
T1, MMASK);
629 T4 = _mm_and_si128(
T4, MMASK);
630 T2 = _mm_add_epi64(
T2, C1);
631 T0 = _mm_add_epi64(T0, _mm_mul_epu32(C2, FIVE));
632 C1 = _mm_srli_epi64(
T2, 26);
633 C2 = _mm_srli_epi64(T0, 26);
634 T2 = _mm_and_si128(
T2, MMASK);
635 T0 = _mm_and_si128(T0, MMASK);
636 T3 = _mm_add_epi64(
T3, C1);
637 T1 = _mm_add_epi64(
T1, C2);
638 C1 = _mm_srli_epi64(
T3, 26);
639 T3 = _mm_and_si128(
T3, MMASK);
640 T4 = _mm_add_epi64(
T4, C1);
643 H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8));
644 H1 = _mm_add_epi64(
T1, _mm_srli_si128(
T1, 8));
645 H2 = _mm_add_epi64(
T2, _mm_srli_si128(
T2, 8));
646 H3 = _mm_add_epi64(
T3, _mm_srli_si128(
T3, 8));
647 H4 = _mm_add_epi64(
T4, _mm_srli_si128(
T4, 8));
649 t0 = _mm_cvtsi128_si32(H0);
652 t1 = _mm_cvtsi128_si32(
H1) +
c;
655 t2 = _mm_cvtsi128_si32(
H2) +
c;
658 t3 = _mm_cvtsi128_si32(H3) +
c;
661 t4 = _mm_cvtsi128_si32(H4) +
c;
669 st->HH[0] = ((
t0) | (
t1 << 26)) &
UINT64_C(0xfffffffffff);
670 st->HH[1] = ((
t1 >> 18) | (t2 << 8) | (t3 << 34)) &
UINT64_C(0xfffffffffff);
671 st->HH[2] = ((t3 >> 10) | (t4 << 16)) &
UINT64_C(0x3ffffffffff);
688 if ((st->leftover == 0) && (
bytes > 32)) {
689 poly1305_first_block(st,
m);
693 want = poly1305_min(32 - st->leftover,
bytes);
697 st->leftover += want;
698 if ((st->leftover < 32) || (
bytes == 0)) {
701 poly1305_first_block(st, st->buffer);
709 want = poly1305_min(64 - st->leftover,
bytes);
713 st->leftover += want;
714 if (st->leftover < 64) {
717 poly1305_blocks(st, st->buffer, 64);
723 want = (
bytes & ~63);
724 poly1305_blocks(st,
m, want);
731 st->leftover +=
bytes;
737 size_t leftover = st->leftover;
747 size_t consumed = poly1305_combine(st,
m, leftover);
748 leftover -= consumed;
765 goto poly1305_donna_atmost15bytes;
768 poly1305_donna_atleast16bytes:
769 t0 = load_u64_le(
m + 0);
770 t1 = load_u64_le(
m + 8);
771 h0 +=
t0 & 0xfffffffffff;
772 t0 = shr128_pair(
t1,
t0, 44);
773 h1 +=
t0 & 0xfffffffffff;
777 d[0] = add128(add128(mul64x64_128(h0, r0), mul64x64_128(h1, s2)),
778 mul64x64_128(h2, s1));
779 d[1] = add128(add128(mul64x64_128(h0, r1), mul64x64_128(h1, r0)),
780 mul64x64_128(h2, s2));
781 d[2] = add128(add128(mul64x64_128(h0, r2), mul64x64_128(h1, r1)),
782 mul64x64_128(h2, r0));
783 h0 = lo128(
d[0]) & 0xfffffffffff;
784 c = shr128(
d[0], 44);
785 d[1] = add128_64(
d[1],
c);
786 h1 = lo128(
d[1]) & 0xfffffffffff;
787 c = shr128(
d[1], 44);
788 d[2] = add128_64(
d[2],
c);
789 h2 = lo128(
d[2]) & 0x3ffffffffff;
790 c = shr128(
d[2], 42);
795 if (leftover >= 16) {
796 goto poly1305_donna_atleast16bytes;
800 poly1305_donna_atmost15bytes:
802 goto poly1305_donna_finish;
809 t0 = load_u64_le(
m + 0);
810 t1 = load_u64_le(
m + 8);
811 h0 +=
t0 & 0xfffffffffff;
812 t0 = shr128_pair(
t1,
t0, 44);
813 h1 +=
t0 & 0xfffffffffff;
816 goto poly1305_donna_mul;
818 poly1305_donna_finish:
839 h0 = (h0 & nc) | (g0 &
c);
840 h1 = (h1 & nc) | (g1 &
c);
841 h2 = (h2 & nc) | (g2 &
c);
846 h0 += (
t0 & 0xfffffffffff);
849 t0 = shr128_pair(
t1,
t0, 44);
850 h1 += (
t0 & 0xfffffffffff) +
c;
856 store_u64_le(mac + 0, ((h0) | (h1 << 44)));
857 store_u64_le(mac + 8, ((h1 >> 20) | (h2 << 24)));
860 #endif // BORINGSSL_HAS_UINT128 && OPENSSL_X86_64