diff --git a/src/secp256k1/src/modinv32_impl.h b/src/secp256k1/src/modinv32_impl.h index 1da47bd22..aa7988c4b 100644 --- a/src/secp256k1/src/modinv32_impl.h +++ b/src/secp256k1/src/modinv32_impl.h @@ -1,538 +1,587 @@ /*********************************************************************** * Copyright (c) 2020 Peter Dettman * * Distributed under the MIT software license, see the accompanying * * file COPYING or https://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_MODINV32_IMPL_H #define SECP256K1_MODINV32_IMPL_H #include "modinv32.h" #include "util.h" #include /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. * * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an * implementation for N=30, using 30-bit signed limbs represented as int32_t. */ #ifdef VERIFY static const secp256k1_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */ -static void secp256k1_modinv32_mul_30(secp256k1_modinv32_signed30 *r, const secp256k1_modinv32_signed30 *a, int32_t factor) { +static void secp256k1_modinv32_mul_30(secp256k1_modinv32_signed30 *r, const secp256k1_modinv32_signed30 *a, int alen, int32_t factor) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); int64_t c = 0; int i; for (i = 0; i < 8; ++i) { - c += (int64_t)a->v[i] * factor; + if (i < alen) c += (int64_t)a->v[i] * factor; r->v[i] = (int32_t)c & M30; c >>= 30; } - c += (int64_t)a->v[8] * factor; + if (8 < alen) c += (int64_t)a->v[8] * factor; VERIFY_CHECK(c == (int32_t)c); r->v[8] = (int32_t)c; } -/* Return -1 for ab*factor. */ -static int secp256k1_modinv32_mul_cmp_30(const secp256k1_modinv32_signed30 *a, const secp256k1_modinv32_signed30 *b, int32_t factor) { +/* Return -1 for ab*factor. A consists of alen limbs; b has 9. */ +static int secp256k1_modinv32_mul_cmp_30(const secp256k1_modinv32_signed30 *a, int alen, const secp256k1_modinv32_signed30 *b, int32_t factor) { int i; secp256k1_modinv32_signed30 am, bm; - secp256k1_modinv32_mul_30(&am, a, 1); /* Normalize all but the top limb of a. */ - secp256k1_modinv32_mul_30(&bm, b, factor); + secp256k1_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + secp256k1_modinv32_mul_30(&bm, b, 9, factor); for (i = 0; i < 8; ++i) { /* Verify that all but the top limb of a and b are normalized. */ VERIFY_CHECK(am.v[i] >> 30 == 0); VERIFY_CHECK(bm.v[i] >> 30 == 0); } for (i = 8; i >= 0; --i) { if (am.v[i] < bm.v[i]) return -1; if (am.v[i] > bm.v[i]) return 1; } return 0; } #endif /* Take as input a signed30 number in range (-2*modulus,modulus), and add a multiple of the modulus * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the * process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range * [0,2^30). */ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int32_t sign, const secp256k1_modinv32_modinfo *modinfo) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4], r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8]; int32_t cond_add, cond_negate; #ifdef VERIFY /* Verify that all limbs are in range (-2^30,2^30). */ int i; for (i = 0; i < 9; ++i) { VERIFY_CHECK(r->v[i] >= -M30); VERIFY_CHECK(r->v[i] <= M30); } - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input * limbs are in range (-2^30,2^30), this cannot overflow an int32_t. Note that the right * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is * indeed the behavior of the right shift operator). */ cond_add = r8 >> 31; r0 += modinfo->modulus.v[0] & cond_add; r1 += modinfo->modulus.v[1] & cond_add; r2 += modinfo->modulus.v[2] & cond_add; r3 += modinfo->modulus.v[3] & cond_add; r4 += modinfo->modulus.v[4] & cond_add; r5 += modinfo->modulus.v[5] & cond_add; r6 += modinfo->modulus.v[6] & cond_add; r7 += modinfo->modulus.v[7] & cond_add; r8 += modinfo->modulus.v[8] & cond_add; cond_negate = sign >> 31; r0 = (r0 ^ cond_negate) - cond_negate; r1 = (r1 ^ cond_negate) - cond_negate; r2 = (r2 ^ cond_negate) - cond_negate; r3 = (r3 ^ cond_negate) - cond_negate; r4 = (r4 ^ cond_negate) - cond_negate; r5 = (r5 ^ cond_negate) - cond_negate; r6 = (r6 ^ cond_negate) - cond_negate; r7 = (r7 ^ cond_negate) - cond_negate; r8 = (r8 ^ cond_negate) - cond_negate; /* Propagate the top bits, to bring limbs back to range (-2^30,2^30). */ r1 += r0 >> 30; r0 &= M30; r2 += r1 >> 30; r1 &= M30; r3 += r2 >> 30; r2 &= M30; r4 += r3 >> 30; r3 &= M30; r5 += r4 >> 30; r4 &= M30; r6 += r5 >> 30; r5 &= M30; r7 += r6 >> 30; r6 &= M30; r8 += r7 >> 30; r7 &= M30; /* In a second step add the modulus again if the result is still negative, bringing r to range * [0,modulus). */ cond_add = r8 >> 31; r0 += modinfo->modulus.v[0] & cond_add; r1 += modinfo->modulus.v[1] & cond_add; r2 += modinfo->modulus.v[2] & cond_add; r3 += modinfo->modulus.v[3] & cond_add; r4 += modinfo->modulus.v[4] & cond_add; r5 += modinfo->modulus.v[5] & cond_add; r6 += modinfo->modulus.v[6] & cond_add; r7 += modinfo->modulus.v[7] & cond_add; r8 += modinfo->modulus.v[8] & cond_add; /* And propagate again. */ r1 += r0 >> 30; r0 &= M30; r2 += r1 >> 30; r1 &= M30; r3 += r2 >> 30; r2 &= M30; r4 += r3 >> 30; r3 &= M30; r5 += r4 >> 30; r4 &= M30; r6 += r5 >> 30; r5 &= M30; r7 += r6 >> 30; r6 &= M30; r8 += r7 >> 30; r7 &= M30; r->v[0] = r0; r->v[1] = r1; r->v[2] = r2; r->v[3] = r3; r->v[4] = r4; r->v[5] = r5; r->v[6] = r6; r->v[7] = r7; r->v[8] = r8; #ifdef VERIFY VERIFY_CHECK(r0 >> 30 == 0); VERIFY_CHECK(r1 >> 30 == 0); VERIFY_CHECK(r2 >> 30 == 0); VERIFY_CHECK(r3 >> 30 == 0); VERIFY_CHECK(r4 >> 30 == 0); VERIFY_CHECK(r5 >> 30 == 0); VERIFY_CHECK(r6 >> 30 == 0); VERIFY_CHECK(r7 >> 30 == 0); VERIFY_CHECK(r8 >> 30 == 0); - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif } /* Data type for transition matrices (see section 3 of explanation). * * t = [ u v ] * [ q r ] */ typedef struct { int32_t u, v, q, r; } secp256k1_modinv32_trans2x2; /* Compute the transition matrix and eta for 30 divsteps. * * Input: eta: initial eta * f0: bottom limb of initial f * g0: bottom limb of initial g * Output: t: transition matrix * Return: final eta * * Implements the divsteps_n_matrix function from the explanation. */ static int32_t secp256k1_modinv32_divsteps_30(int32_t eta, uint32_t f0, uint32_t g0, secp256k1_modinv32_trans2x2 *t) { /* u,v,q,r are the elements of the transformation matrix being built up, * starting with the identity matrix. Semantically they are signed integers * in range [-2^30,2^30], but here represented as unsigned mod 2^32. This * permits left shifting (which is UB for negative numbers). The range * being inside [-2^31,2^31) means that casting to signed works correctly. */ uint32_t u = 1, v = 0, q = 0, r = 1; uint32_t c1, c2, f = f0, g = g0, x, y, z; int i; for (i = 0; i < 30; ++i) { VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ VERIFY_CHECK((u * f0 + v * g0) == f << i); VERIFY_CHECK((q * f0 + r * g0) == g << i); /* Compute conditional masks for (eta < 0) and for (g & 1). */ c1 = eta >> 31; c2 = -(g & 1); /* Compute x,y,z, conditionally negated versions of f,u,v. */ x = (f ^ c1) - c1; y = (u ^ c1) - c1; z = (v ^ c1) - c1; /* Conditionally add x,y,z to g,q,r. */ g += x & c2; q += y & c2; r += z & c2; /* In what follows, c1 is a condition mask for (eta < 0) and (g & 1). */ c1 &= c2; /* Conditionally negate eta, and unconditionally subtract 1. */ eta = (eta ^ c1) - (c1 + 1); /* Conditionally add g,q,r to f,u,v. */ f += g & c1; u += q & c1; v += r & c1; /* Shifts */ g >>= 1; u <<= 1; v <<= 1; /* Bounds on eta that follow from the bounds on iteration count (max 25*30 divsteps). */ VERIFY_CHECK(eta >= -751 && eta <= 751); } /* Return data in t and return value. */ t->u = (int32_t)u; t->v = (int32_t)v; t->q = (int32_t)q; t->r = (int32_t)r; /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 30 of them will have determinant 2^30. */ VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30); return eta; } /* Compute the transition matrix and eta for 30 divsteps (variable time). * * Input: eta: initial eta * f0: bottom limb of initial f * g0: bottom limb of initial g * Output: t: transition matrix * Return: final eta * * Implements the divsteps_n_matrix_var function from the explanation. */ static int32_t secp256k1_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, secp256k1_modinv32_trans2x2 *t) { /* inv256[i] = -(2*i+1)^-1 (mod 256) */ static const uint8_t inv256[128] = { 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, 0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31, 0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89, 0x07, 0x9D, 0x7B, 0x51, 0x4F, 0x25, 0x03, 0x99, 0x17, 0x2D, 0x0B, 0x61, 0x5F, 0xB5, 0x93, 0xA9, 0x27, 0xBD, 0x9B, 0x71, 0x6F, 0x45, 0x23, 0xB9, 0x37, 0x4D, 0x2B, 0x81, 0x7F, 0xD5, 0xB3, 0xC9, 0x47, 0xDD, 0xBB, 0x91, 0x8F, 0x65, 0x43, 0xD9, 0x57, 0x6D, 0x4B, 0xA1, 0x9F, 0xF5, 0xD3, 0xE9, 0x67, 0xFD, 0xDB, 0xB1, 0xAF, 0x85, 0x63, 0xF9, 0x77, 0x8D, 0x6B, 0xC1, 0xBF, 0x15, 0xF3, 0x09, 0x87, 0x1D, 0xFB, 0xD1, 0xCF, 0xA5, 0x83, 0x19, 0x97, 0xAD, 0x8B, 0xE1, 0xDF, 0x35, 0x13, 0x29, 0xA7, 0x3D, 0x1B, 0xF1, 0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01 }; /* Transformation matrix; see comments in secp256k1_modinv32_divsteps_30. */ uint32_t u = 1, v = 0, q = 0, r = 1; uint32_t f = f0, g = g0, m; uint16_t w; int i = 30, limit, zeros; for (;;) { /* Use a sentinel bit to count zeros only up to i. */ zeros = secp256k1_ctz32_var(g | (UINT32_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; v <<= zeros; eta -= zeros; i -= zeros; /* We're done once we've done 30 divsteps. */ if (i == 0) break; VERIFY_CHECK((f & 1) == 1); VERIFY_CHECK((g & 1) == 1); VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i)); VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i)); /* Bounds on eta that follow from the bounds on iteration count (max 25*30 divsteps). */ VERIFY_CHECK(eta >= -751 && eta <= 751); /* If eta is negative, negate it and replace f,g with g,-f. */ if (eta < 0) { uint32_t tmp; eta = -eta; tmp = f; f = g; g = -tmp; tmp = u; u = q; q = -tmp; tmp = v; v = r; r = -tmp; } /* eta is now >= 0. In what follows we're going to cancel out the bottom bits of g. No more * than i can be cancelled out (as we'd be done before that point), and no more than eta+1 * can be done as its sign will flip once that happens. */ limit = ((int)eta + 1) > i ? i : ((int)eta + 1); /* m is a mask for the bottom min(limit, 8) bits (our table only supports 8 bits). */ VERIFY_CHECK(limit > 0 && limit <= 30); m = (UINT32_MAX >> (32 - limit)) & 255U; /* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */ w = (g * inv256[(f >> 1) & 127]) & m; /* Do so. */ g += f * w; q += u * w; r += v * w; VERIFY_CHECK((g & m) == 0); } /* Return data in t and return value. */ t->u = (int32_t)u; t->v = (int32_t)v; t->q = (int32_t)q; t->r = (int32_t)r; /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 30 of them will have determinant 2^30. */ VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30); return eta; } /* Compute (t/2^30) * [d, e] mod modulus, where t is a transition matrix for 30 divsteps. * * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range * (-2^30,2^30). * * This implements the update_de function from the explanation. */ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp256k1_modinv32_signed30 *e, const secp256k1_modinv32_trans2x2 *t, const secp256k1_modinv32_modinfo* modinfo) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t di, ei, md, me, sd, se; int64_t cd, ce; int i; #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */ VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */ VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */ VERIFY_CHECK((labs(q) + labs(r)) <= M30 + 1); /* |q|+|r| <= 2^30 */ #endif /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d->v[8] >> 31; se = e->v[8] >> 31; md = (u & sd) + (v & se); me = (q & sd) + (r & se); /* Begin computing t*[d,e]. */ di = d->v[0]; ei = e->v[0]; cd = (int64_t)u * di + (int64_t)v * ei; ce = (int64_t)q * di + (int64_t)r * ei; /* Correct md,me so that t*[d,e]+modulus*[md,me] has 30 zero bottom bits. */ md -= (modinfo->modulus_inv30 * (uint32_t)cd + md) & M30; me -= (modinfo->modulus_inv30 * (uint32_t)ce + me) & M30; /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ cd += (int64_t)modinfo->modulus.v[0] * md; ce += (int64_t)modinfo->modulus.v[0] * me; /* Verify that the low 30 bits of the computation are indeed zero, and then throw them away. */ VERIFY_CHECK(((int32_t)cd & M30) == 0); cd >>= 30; VERIFY_CHECK(((int32_t)ce & M30) == 0); ce >>= 30; /* Now iteratively compute limb i=1..8 of t*[d,e]+modulus*[md,me], and store them in output * limb i-1 (shifting down by 30 bits). */ for (i = 1; i < 9; ++i) { di = d->v[i]; ei = e->v[i]; cd += (int64_t)u * di + (int64_t)v * ei; ce += (int64_t)q * di + (int64_t)r * ei; cd += (int64_t)modinfo->modulus.v[i] * md; ce += (int64_t)modinfo->modulus.v[i] * me; d->v[i - 1] = (int32_t)cd & M30; cd >>= 30; e->v[i - 1] = (int32_t)ce & M30; ce >>= 30; } /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ d->v[8] = (int32_t)cd; e->v[8] = (int32_t)ce; #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ #endif } /* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. * * This implements the update_fg function from the explanation. */ static void secp256k1_modinv32_update_fg_30(secp256k1_modinv32_signed30 *f, secp256k1_modinv32_signed30 *g, const secp256k1_modinv32_trans2x2 *t) { const int32_t M30 = (int32_t)(UINT32_MAX >> 2); const int32_t u = t->u, v = t->v, q = t->q, r = t->r; int32_t fi, gi; int64_t cf, cg; int i; /* Start computing t*[f,g]. */ fi = f->v[0]; gi = g->v[0]; cf = (int64_t)u * fi + (int64_t)v * gi; cg = (int64_t)q * fi + (int64_t)r * gi; /* Verify that the bottom 30 bits of the result are zero, and then throw them away. */ VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; /* Now iteratively compute limb i=1..8 of t*[f,g], and store them in output limb i-1 (shifting * down by 30 bits). */ for (i = 1; i < 9; ++i) { fi = f->v[i]; gi = g->v[i]; cf += (int64_t)u * fi + (int64_t)v * gi; cg += (int64_t)q * fi + (int64_t)r * gi; f->v[i - 1] = (int32_t)cf & M30; cf >>= 30; g->v[i - 1] = (int32_t)cg & M30; cg >>= 30; } /* What remains is limb 9 of t*[f,g]; store it as output limb 8. */ f->v[8] = (int32_t)cf; g->v[8] = (int32_t)cg; } +/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. + * + * Version that operates on a variable number of limbs in f and g. + * + * This implements the update_fg function from the explanation in modinv64_impl.h. + */ +static void secp256k1_modinv32_update_fg_30_var(int len, secp256k1_modinv32_signed30 *f, secp256k1_modinv32_signed30 *g, const secp256k1_modinv32_trans2x2 *t) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t fi, gi; + int64_t cf, cg; + int i; + VERIFY_CHECK(len > 0); + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting + * down by 30 bits). */ + for (i = 1; i < len; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + f->v[i - 1] = (int32_t)cf & M30; cf >>= 30; + g->v[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ + f->v[len - 1] = (int32_t)cf; + g->v[len - 1] = (int32_t)cg; +} + /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ secp256k1_modinv32_signed30 d = {{0}}; secp256k1_modinv32_signed30 e = {{1}}; secp256k1_modinv32_signed30 f = modinfo->modulus; secp256k1_modinv32_signed30 g = *x; int i; int32_t eta = -1; /* Do 25 iterations of 30 divsteps each = 750 divsteps. 724 suffices for 256-bit inputs. */ for (i = 0; i < 25; ++i) { /* Compute transition matrix and new eta after 30 divsteps. */ secp256k1_modinv32_trans2x2 t; eta = secp256k1_modinv32_divsteps_30(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif secp256k1_modinv32_update_fg_30(&f, &g, &t); #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &SECP256K1_SIGNED30_ONE, -1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (secp256k1_modinv32_mul_cmp_30(x, &SECP256K1_SIGNED30_ONE, 0) == 0 && - secp256k1_modinv32_mul_cmp_30(&d, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, 1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (secp256k1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo); *x = d; } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ secp256k1_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; secp256k1_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; secp256k1_modinv32_signed30 f = modinfo->modulus; secp256k1_modinv32_signed30 g = *x; #ifdef VERIFY int i = 0; #endif - int j; + int j, len = 9; int32_t eta = -1; - int32_t cond; + int32_t cond, fn, gn; /* Do iterations of 30 divsteps each until g=0. */ while (1) { /* Compute transition matrix and new eta after 30 divsteps. */ secp256k1_modinv32_trans2x2 t; eta = secp256k1_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif - secp256k1_modinv32_update_fg_30(&f, &g, &t); + secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of g is 0, there is a chance g=0. */ if (g.v[0] == 0) { cond = 0; - /* Check if the other limbs are also 0. */ - for (j = 1; j < 9; ++j) { + /* Check if all other limbs are also 0. */ + for (j = 1; j < len; ++j) { cond |= g.v[j]; } /* If so, we're done. */ if (cond == 0) break; } + + /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ + fn = f.v[len - 1]; + gn = g.v[len - 1]; + cond = ((int32_t)len - 2) >> 31; + cond |= fn ^ (fn >> 31); + cond |= gn ^ (gn >> 31); + /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ + if (cond == 0) { + f.v[len - 2] |= (uint32_t)fn << 30; + g.v[len - 2] |= (uint32_t)gn << 30; + --len; + } #ifdef VERIFY VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, &SECP256K1_SIGNED30_ONE, -1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (secp256k1_modinv32_mul_cmp_30(x, &SECP256K1_SIGNED30_ONE, 0) == 0 && - secp256k1_modinv32_mul_cmp_30(&d, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, 1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (secp256k1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ - secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo); + secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo); *x = d; } #endif /* SECP256K1_MODINV32_IMPL_H */ diff --git a/src/secp256k1/src/modinv64_impl.h b/src/secp256k1/src/modinv64_impl.h index 15cda3d73..78505fa18 100644 --- a/src/secp256k1/src/modinv64_impl.h +++ b/src/secp256k1/src/modinv64_impl.h @@ -1,540 +1,589 @@ /*********************************************************************** * Copyright (c) 2020 Peter Dettman * * Distributed under the MIT software license, see the accompanying * * file COPYING or https://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_MODINV64_IMPL_H #define SECP256K1_MODINV64_IMPL_H #include "modinv64.h" #include "util.h" /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. * * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an * implementation for N=62, using 62-bit signed limbs represented as int64_t. */ #ifdef VERIFY /* Helper function to compute the absolute value of an int64_t. * (we don't use abs/labs/llabs as it depends on the int sizes). */ static int64_t secp256k1_modinv64_abs(int64_t v) { VERIFY_CHECK(v > INT64_MIN); if (v < 0) return -v; return v; } static const secp256k1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ -static void secp256k1_modinv64_mul_62(secp256k1_modinv64_signed62 *r, const secp256k1_modinv64_signed62 *a, int64_t factor) { +static void secp256k1_modinv64_mul_62(secp256k1_modinv64_signed62 *r, const secp256k1_modinv64_signed62 *a, int alen, int64_t factor) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); int128_t c = 0; int i; for (i = 0; i < 4; ++i) { - c += (int128_t)a->v[i] * factor; + if (i < alen) c += (int128_t)a->v[i] * factor; r->v[i] = (int64_t)c & M62; c >>= 62; } - c += (int128_t)a->v[4] * factor; + if (4 < alen) c += (int128_t)a->v[4] * factor; VERIFY_CHECK(c == (int64_t)c); r->v[4] = (int64_t)c; } -/* Return -1 for ab*factor. */ -static int secp256k1_modinv64_mul_cmp_62(const secp256k1_modinv64_signed62 *a, const secp256k1_modinv64_signed62 *b, int64_t factor) { +/* Return -1 for ab*factor. A has alen limbs; b has 5. */ +static int secp256k1_modinv64_mul_cmp_62(const secp256k1_modinv64_signed62 *a, int alen, const secp256k1_modinv64_signed62 *b, int64_t factor) { int i; secp256k1_modinv64_signed62 am, bm; - secp256k1_modinv64_mul_62(&am, a, 1); /* Normalize all but the top limb of a. */ - secp256k1_modinv64_mul_62(&bm, b, factor); + secp256k1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + secp256k1_modinv64_mul_62(&bm, b, 5, factor); for (i = 0; i < 4; ++i) { /* Verify that all but the top limb of a and b are normalized. */ VERIFY_CHECK(am.v[i] >> 62 == 0); VERIFY_CHECK(bm.v[i] >> 62 == 0); } for (i = 4; i >= 0; --i) { if (am.v[i] < bm.v[i]) return -1; if (am.v[i] > bm.v[i]) return 1; } return 0; } #endif /* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range * [0,2^62). */ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int64_t sign, const secp256k1_modinv64_modinfo *modinfo) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4]; int64_t cond_add, cond_negate; #ifdef VERIFY /* Verify that all limbs are in range (-2^62,2^62). */ int i; for (i = 0; i < 5; ++i) { VERIFY_CHECK(r->v[i] >= -M62); VERIFY_CHECK(r->v[i] <= M62); } - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is * indeed the behavior of the right shift operator). */ cond_add = r4 >> 63; r0 += modinfo->modulus.v[0] & cond_add; r1 += modinfo->modulus.v[1] & cond_add; r2 += modinfo->modulus.v[2] & cond_add; r3 += modinfo->modulus.v[3] & cond_add; r4 += modinfo->modulus.v[4] & cond_add; cond_negate = sign >> 63; r0 = (r0 ^ cond_negate) - cond_negate; r1 = (r1 ^ cond_negate) - cond_negate; r2 = (r2 ^ cond_negate) - cond_negate; r3 = (r3 ^ cond_negate) - cond_negate; r4 = (r4 ^ cond_negate) - cond_negate; /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */ r1 += r0 >> 62; r0 &= M62; r2 += r1 >> 62; r1 &= M62; r3 += r2 >> 62; r2 &= M62; r4 += r3 >> 62; r3 &= M62; /* In a second step add the modulus again if the result is still negative, bringing * r to range [0,modulus). */ cond_add = r4 >> 63; r0 += modinfo->modulus.v[0] & cond_add; r1 += modinfo->modulus.v[1] & cond_add; r2 += modinfo->modulus.v[2] & cond_add; r3 += modinfo->modulus.v[3] & cond_add; r4 += modinfo->modulus.v[4] & cond_add; /* And propagate again. */ r1 += r0 >> 62; r0 &= M62; r2 += r1 >> 62; r1 &= M62; r3 += r2 >> 62; r2 &= M62; r4 += r3 >> 62; r3 &= M62; r->v[0] = r0; r->v[1] = r1; r->v[2] = r2; r->v[3] = r3; r->v[4] = r4; #ifdef VERIFY VERIFY_CHECK(r0 >> 62 == 0); VERIFY_CHECK(r1 >> 62 == 0); VERIFY_CHECK(r2 >> 62 == 0); VERIFY_CHECK(r3 >> 62 == 0); VERIFY_CHECK(r4 >> 62 == 0); - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif } /* Data type for transition matrices (see section 3 of explanation). * * t = [ u v ] * [ q r ] */ typedef struct { int64_t u, v, q, r; } secp256k1_modinv64_trans2x2; /* Compute the transition matrix and eta for 62 divsteps. * * Input: eta: initial eta * f0: bottom limb of initial f * g0: bottom limb of initial g * Output: t: transition matrix * Return: final eta * * Implements the divsteps_n_matrix function from the explanation. */ static int64_t secp256k1_modinv64_divsteps_62(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) { /* u,v,q,r are the elements of the transformation matrix being built up, * starting with the identity matrix. Semantically they are signed integers * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This * permits left shifting (which is UB for negative numbers). The range * being inside [-2^63,2^63) means that casting to signed works correctly. */ uint64_t u = 1, v = 0, q = 0, r = 1; uint64_t c1, c2, f = f0, g = g0, x, y, z; int i; for (i = 0; i < 62; ++i) { VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ VERIFY_CHECK((u * f0 + v * g0) == f << i); VERIFY_CHECK((q * f0 + r * g0) == g << i); /* Compute conditional masks for (eta < 0) and for (g & 1). */ c1 = eta >> 63; c2 = -(g & 1); /* Compute x,y,z, conditionally negated versions of f,u,v. */ x = (f ^ c1) - c1; y = (u ^ c1) - c1; z = (v ^ c1) - c1; /* Conditionally add x,y,z to g,q,r. */ g += x & c2; q += y & c2; r += z & c2; /* In what follows, c1 is a condition mask for (eta < 0) and (g & 1). */ c1 &= c2; /* Conditionally negate eta, and unconditionally subtract 1. */ eta = (eta ^ c1) - (c1 + 1); /* Conditionally add g,q,r to f,u,v. */ f += g & c1; u += q & c1; v += r & c1; /* Shifts */ g >>= 1; u <<= 1; v <<= 1; /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */ VERIFY_CHECK(eta >= -745 && eta <= 745); } /* Return data in t and return value. */ t->u = (int64_t)u; t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 62 of them will have determinant 2^62. */ VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62); return eta; } /* Compute the transition matrix and eta for 62 divsteps (variable time). * * Input: eta: initial eta * f0: bottom limb of initial f * g0: bottom limb of initial g * Output: t: transition matrix * Return: final eta * * Implements the divsteps_n_matrix_var function from the explanation. */ static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) { /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */ uint64_t u = 1, v = 0, q = 0, r = 1; uint64_t f = f0, g = g0, m; uint32_t w; int i = 62, limit, zeros; for (;;) { /* Use a sentinel bit to count zeros only up to i. */ zeros = secp256k1_ctz64_var(g | (UINT64_MAX << i)); /* Perform zeros divsteps at once; they all just divide g by two. */ g >>= zeros; u <<= zeros; v <<= zeros; eta -= zeros; i -= zeros; /* We're done once we've done 62 divsteps. */ if (i == 0) break; VERIFY_CHECK((f & 1) == 1); VERIFY_CHECK((g & 1) == 1); VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */ VERIFY_CHECK(eta >= -745 && eta <= 745); /* If eta is negative, negate it and replace f,g with g,-f. */ if (eta < 0) { uint64_t tmp; eta = -eta; tmp = f; f = g; g = -tmp; tmp = u; u = q; q = -tmp; tmp = v; v = r; r = -tmp; /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled * out (as we'd be done before that point), and no more than eta+1 can be done as its * will flip again once that happens. */ limit = ((int)eta + 1) > i ? i : ((int)eta + 1); VERIFY_CHECK(limit > 0 && limit <= 62); /* m is a mask for the bottom min(limit, 6) bits. */ m = (UINT64_MAX >> (64 - limit)) & 63U; /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6) * bits. */ w = (f * g * (f * f - 2)) & m; } else { /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as * eta tends to be smaller here. */ limit = ((int)eta + 1) > i ? i : ((int)eta + 1); VERIFY_CHECK(limit > 0 && limit <= 62); /* m is a mask for the bottom min(limit, 4) bits. */ m = (UINT64_MAX >> (64 - limit)) & 15U; /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4) * bits. */ w = f + (((f + 1) & 4) << 1); w = (-w * g) & m; } g += f * w; q += u * w; r += v * w; VERIFY_CHECK((g & m) == 0); } /* Return data in t and return value. */ t->u = (int64_t)u; t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 62 of them will have determinant 2^62. */ VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62); return eta; } /* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix for 62 divsteps. * * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range * (-2^62,2^62). * * This implements the update_de function from the explanation. */ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp256k1_modinv64_signed62 *e, const secp256k1_modinv64_trans2x2 *t, const secp256k1_modinv64_modinfo* modinfo) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t md, me, sd, se; int128_t cd, ce; #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ #endif /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d4 >> 63; se = e4 >> 63; md = (u & sd) + (v & se); me = (q & sd) + (r & se); /* Begin computing t*[d,e]. */ cd = (int128_t)u * d0 + (int128_t)v * e0; ce = (int128_t)q * d0 + (int128_t)r * e0; /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62; me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62; /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ cd += (int128_t)modinfo->modulus.v[0] * md; ce += (int128_t)modinfo->modulus.v[0] * me; /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62; VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62; /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ cd += (int128_t)u * d1 + (int128_t)v * e1; ce += (int128_t)q * d1 + (int128_t)r * e1; if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */ cd += (int128_t)modinfo->modulus.v[1] * md; ce += (int128_t)modinfo->modulus.v[1] * me; } d->v[0] = (int64_t)cd & M62; cd >>= 62; e->v[0] = (int64_t)ce & M62; ce >>= 62; /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ cd += (int128_t)u * d2 + (int128_t)v * e2; ce += (int128_t)q * d2 + (int128_t)r * e2; if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */ cd += (int128_t)modinfo->modulus.v[2] * md; ce += (int128_t)modinfo->modulus.v[2] * me; } d->v[1] = (int64_t)cd & M62; cd >>= 62; e->v[1] = (int64_t)ce & M62; ce >>= 62; /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ cd += (int128_t)u * d3 + (int128_t)v * e3; ce += (int128_t)q * d3 + (int128_t)r * e3; if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */ cd += (int128_t)modinfo->modulus.v[3] * md; ce += (int128_t)modinfo->modulus.v[3] * me; } d->v[2] = (int64_t)cd & M62; cd >>= 62; e->v[2] = (int64_t)ce & M62; ce >>= 62; /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ cd += (int128_t)u * d4 + (int128_t)v * e4; ce += (int128_t)q * d4 + (int128_t)r * e4; cd += (int128_t)modinfo->modulus.v[4] * md; ce += (int128_t)modinfo->modulus.v[4] * me; d->v[3] = (int64_t)cd & M62; cd >>= 62; e->v[3] = (int64_t)ce & M62; ce >>= 62; /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ d->v[4] = (int64_t)cd; e->v[4] = (int64_t)ce; #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ #endif } /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. * * This implements the update_fg function from the explanation. */ static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) { const int64_t M62 = (int64_t)(UINT64_MAX >> 2); const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int128_t cf, cg; /* Start computing t*[f,g]. */ cf = (int128_t)u * f0 + (int128_t)v * g0; cg = (int128_t)q * f0 + (int128_t)r * g0; /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ cf += (int128_t)u * f1 + (int128_t)v * g1; cg += (int128_t)q * f1 + (int128_t)r * g1; f->v[0] = (int64_t)cf & M62; cf >>= 62; g->v[0] = (int64_t)cg & M62; cg >>= 62; /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ cf += (int128_t)u * f2 + (int128_t)v * g2; cg += (int128_t)q * f2 + (int128_t)r * g2; f->v[1] = (int64_t)cf & M62; cf >>= 62; g->v[1] = (int64_t)cg & M62; cg >>= 62; /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ cf += (int128_t)u * f3 + (int128_t)v * g3; cg += (int128_t)q * f3 + (int128_t)r * g3; f->v[2] = (int64_t)cf & M62; cf >>= 62; g->v[2] = (int64_t)cg & M62; cg >>= 62; /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ cf += (int128_t)u * f4 + (int128_t)v * g4; cg += (int128_t)q * f4 + (int128_t)r * g4; f->v[3] = (int64_t)cf & M62; cf >>= 62; g->v[3] = (int64_t)cg & M62; cg >>= 62; /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ f->v[4] = (int64_t)cf; g->v[4] = (int64_t)cg; } +/* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. + * + * Version that operates on a variable number of limbs in f and g. + * + * This implements the update_fg function from the explanation. + */ +static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int64_t fi, gi; + int128_t cf, cg; + int i; + VERIFY_CHECK(len > 0); + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int128_t)u * fi + (int128_t)v * gi; + cg = (int128_t)q * fi + (int128_t)r * gi; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting + * down by 62 bits). */ + for (i = 1; i < len; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int128_t)u * fi + (int128_t)v * gi; + cg += (int128_t)q * fi + (int128_t)r * gi; + f->v[i - 1] = (int64_t)cf & M62; cf >>= 62; + g->v[i - 1] = (int64_t)cg & M62; cg >>= 62; + } + /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ + f->v[len - 1] = (int64_t)cf; + g->v[len - 1] = (int64_t)cg; +} + /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; secp256k1_modinv64_signed62 f = modinfo->modulus; secp256k1_modinv64_signed62 g = *x; int i; int64_t eta = -1; /* Do 12 iterations of 62 divsteps each = 744 divsteps. 724 suffices for 256-bit inputs. */ for (i = 0; i < 12; ++i) { /* Compute transition matrix and new eta after 62 divsteps. */ secp256k1_modinv64_trans2x2 t; eta = secp256k1_modinv64_divsteps_62(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif secp256k1_modinv64_update_fg_62(&f, &g, &t); #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &SECP256K1_SIGNED62_ONE, -1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (secp256k1_modinv64_mul_cmp_62(x, &SECP256K1_SIGNED62_ONE, 0) == 0 && - secp256k1_modinv64_mul_cmp_62(&d, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, 1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo); *x = d; } /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) { /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; secp256k1_modinv64_signed62 f = modinfo->modulus; secp256k1_modinv64_signed62 g = *x; - int j; #ifdef VERIFY int i = 0; #endif + int j, len = 5; int64_t eta = -1; - int64_t cond; + int64_t cond, fn, gn; /* Do iterations of 62 divsteps each until g=0. */ while (1) { /* Compute transition matrix and new eta after 62 divsteps. */ secp256k1_modinv64_trans2x2 t; eta = secp256k1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); /* Update d,e using that transition matrix. */ secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ #ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif - secp256k1_modinv64_update_fg_62(&f, &g, &t); + secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of g is zero, there is a chance that g=0. */ if (g.v[0] == 0) { cond = 0; /* Check if the other limbs are also 0. */ - for (j = 1; j < 5; ++j) { + for (j = 1; j < len; ++j) { cond |= g.v[j]; } /* If so, we're done. */ if (cond == 0) break; } + + /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ + fn = f.v[len - 1]; + gn = g.v[len - 1]; + cond = ((int64_t)len - 2) >> 63; + cond |= fn ^ (fn >> 63); + cond |= gn ^ (gn >> 63); + /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ + if (cond == 0) { + f.v[len - 2] |= (uint64_t)fn << 62; + g.v[len - 2] |= (uint64_t)gn << 62; + --len; + } #ifdef VERIFY VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &modinfo->modulus, 1) < 0); /* g < modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ #endif } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ #ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, &SECP256K1_SIGNED62_ONE, -1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (secp256k1_modinv64_mul_cmp_62(x, &SECP256K1_SIGNED62_ONE, 0) == 0 && - secp256k1_modinv64_mul_cmp_62(&d, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, 1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, &modinfo->modulus, -1) == 0))); + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); #endif /* Optionally negate d, normalize to [0,modulus), and return it. */ - secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo); + secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo); *x = d; } #endif /* SECP256K1_MODINV64_IMPL_H */