/root/bitcoin/src/secp256k1/src/modinv64_impl.h
| Line | Count | Source | 
| 1 |  | /*********************************************************************** | 
| 2 |  |  * Copyright (c) 2020 Peter Dettman                                    * | 
| 3 |  |  * Distributed under the MIT software license, see the accompanying    * | 
| 4 |  |  * file COPYING or https://www.opensource.org/licenses/mit-license.php.* | 
| 5 |  |  **********************************************************************/ | 
| 6 |  |  | 
| 7 |  | #ifndef SECP256K1_MODINV64_IMPL_H | 
| 8 |  | #define SECP256K1_MODINV64_IMPL_H | 
| 9 |  |  | 
| 10 |  | #include "int128.h" | 
| 11 |  | #include "modinv64.h" | 
| 12 |  |  | 
| 13 |  | /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and | 
| 14 |  |  * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. | 
| 15 |  |  * | 
| 16 |  |  * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an | 
| 17 |  |  * implementation for N=62, using 62-bit signed limbs represented as int64_t. | 
| 18 |  |  */ | 
| 19 |  |  | 
| 20 |  | /* Data type for transition matrices (see section 3 of explanation). | 
| 21 |  |  * | 
| 22 |  |  * t = [ u  v ] | 
| 23 |  |  *     [ q  r ] | 
| 24 |  |  */ | 
| 25 |  | typedef struct { | 
| 26 |  |     int64_t u, v, q, r; | 
| 27 |  | } secp256k1_modinv64_trans2x2; | 
| 28 |  |  | 
| 29 |  | #ifdef VERIFY | 
| 30 |  | /* Helper function to compute the absolute value of an int64_t. | 
| 31 |  |  * (we don't use abs/labs/llabs as it depends on the int sizes). */ | 
| 32 |  | static int64_t secp256k1_modinv64_abs(int64_t v) { | 
| 33 |  |     VERIFY_CHECK(v > INT64_MIN); | 
| 34 |  |     if (v < 0) return -v; | 
| 35 |  |     return v; | 
| 36 |  | } | 
| 37 |  |  | 
| 38 |  | static const secp256k1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; | 
| 39 |  |  | 
| 40 |  | /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ | 
| 41 |  | static void secp256k1_modinv64_mul_62(secp256k1_modinv64_signed62 *r, const secp256k1_modinv64_signed62 *a, int alen, int64_t factor) { | 
| 42 |  |     const uint64_t M62 = UINT64_MAX >> 2; | 
| 43 |  |     secp256k1_int128 c, d; | 
| 44 |  |     int i; | 
| 45 |  |     secp256k1_i128_from_i64(&c, 0); | 
| 46 |  |     for (i = 0; i < 4; ++i) { | 
| 47 |  |         if (i < alen) secp256k1_i128_accum_mul(&c, a->v[i], factor); | 
| 48 |  |         r->v[i] = secp256k1_i128_to_u64(&c) & M62; secp256k1_i128_rshift(&c, 62); | 
| 49 |  |     } | 
| 50 |  |     if (4 < alen) secp256k1_i128_accum_mul(&c, a->v[4], factor); | 
| 51 |  |     secp256k1_i128_from_i64(&d, secp256k1_i128_to_i64(&c)); | 
| 52 |  |     VERIFY_CHECK(secp256k1_i128_eq_var(&c, &d)); | 
| 53 |  |     r->v[4] = secp256k1_i128_to_i64(&c); | 
| 54 |  | } | 
| 55 |  |  | 
| 56 |  | /* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A has alen limbs; b has 5. */ | 
| 57 |  | static int secp256k1_modinv64_mul_cmp_62(const secp256k1_modinv64_signed62 *a, int alen, const secp256k1_modinv64_signed62 *b, int64_t factor) { | 
| 58 |  |     int i; | 
| 59 |  |     secp256k1_modinv64_signed62 am, bm; | 
| 60 |  |     secp256k1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ | 
| 61 |  |     secp256k1_modinv64_mul_62(&bm, b, 5, factor); | 
| 62 |  |     for (i = 0; i < 4; ++i) { | 
| 63 |  |         /* Verify that all but the top limb of a and b are normalized. */ | 
| 64 |  |         VERIFY_CHECK(am.v[i] >> 62 == 0); | 
| 65 |  |         VERIFY_CHECK(bm.v[i] >> 62 == 0); | 
| 66 |  |     } | 
| 67 |  |     for (i = 4; i >= 0; --i) { | 
| 68 |  |         if (am.v[i] < bm.v[i]) return -1; | 
| 69 |  |         if (am.v[i] > bm.v[i]) return 1; | 
| 70 |  |     } | 
| 71 |  |     return 0; | 
| 72 |  | } | 
| 73 |  |  | 
| 74 |  | /* Check if the determinant of t is equal to 1 << n. If abs, check if |det t| == 1 << n. */ | 
| 75 |  | static int secp256k1_modinv64_det_check_pow2(const secp256k1_modinv64_trans2x2 *t, unsigned int n, int abs) { | 
| 76 |  |     secp256k1_int128 a; | 
| 77 |  |     secp256k1_i128_det(&a, t->u, t->v, t->q, t->r); | 
| 78 |  |     if (secp256k1_i128_check_pow2(&a, n, 1)) return 1; | 
| 79 |  |     if (abs && secp256k1_i128_check_pow2(&a, n, -1)) return 1; | 
| 80 |  |     return 0; | 
| 81 |  | } | 
| 82 |  | #endif | 
| 83 |  |  | 
| 84 |  | /* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus | 
| 85 |  |  * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the | 
| 86 |  |  * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range | 
| 87 |  |  * [0,2^62). */ | 
| 88 | 0 | static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int64_t sign, const secp256k1_modinv64_modinfo *modinfo) { | 
| 89 | 0 |     const int64_t M62 = (int64_t)(UINT64_MAX >> 2); | 
| 90 | 0 |     int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4]; | 
| 91 | 0 |     volatile int64_t cond_add, cond_negate; | 
| 92 |  | 
 | 
| 93 |  | #ifdef VERIFY | 
| 94 |  |     /* Verify that all limbs are in range (-2^62,2^62). */ | 
| 95 |  |     int i; | 
| 96 |  |     for (i = 0; i < 5; ++i) { | 
| 97 |  |         VERIFY_CHECK(r->v[i] >= -M62); | 
| 98 |  |         VERIFY_CHECK(r->v[i] <= M62); | 
| 99 |  |     } | 
| 100 |  |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ | 
| 101 |  |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ | 
| 102 |  | #endif | 
| 103 |  |  | 
| 104 |  |     /* In a first step, add the modulus if the input is negative, and then negate if requested. | 
| 105 |  |      * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input | 
| 106 |  |      * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right | 
| 107 |  |      * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is | 
| 108 |  |      * indeed the behavior of the right shift operator). */ | 
| 109 | 0 |     cond_add = r4 >> 63; | 
| 110 | 0 |     r0 += modinfo->modulus.v[0] & cond_add; | 
| 111 | 0 |     r1 += modinfo->modulus.v[1] & cond_add; | 
| 112 | 0 |     r2 += modinfo->modulus.v[2] & cond_add; | 
| 113 | 0 |     r3 += modinfo->modulus.v[3] & cond_add; | 
| 114 | 0 |     r4 += modinfo->modulus.v[4] & cond_add; | 
| 115 | 0 |     cond_negate = sign >> 63; | 
| 116 | 0 |     r0 = (r0 ^ cond_negate) - cond_negate; | 
| 117 | 0 |     r1 = (r1 ^ cond_negate) - cond_negate; | 
| 118 | 0 |     r2 = (r2 ^ cond_negate) - cond_negate; | 
| 119 | 0 |     r3 = (r3 ^ cond_negate) - cond_negate; | 
| 120 | 0 |     r4 = (r4 ^ cond_negate) - cond_negate; | 
| 121 |  |     /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */ | 
| 122 | 0 |     r1 += r0 >> 62; r0 &= M62; | 
| 123 | 0 |     r2 += r1 >> 62; r1 &= M62; | 
| 124 | 0 |     r3 += r2 >> 62; r2 &= M62; | 
| 125 | 0 |     r4 += r3 >> 62; r3 &= M62; | 
| 126 |  |  | 
| 127 |  |     /* In a second step add the modulus again if the result is still negative, bringing | 
| 128 |  |      * r to range [0,modulus). */ | 
| 129 | 0 |     cond_add = r4 >> 63; | 
| 130 | 0 |     r0 += modinfo->modulus.v[0] & cond_add; | 
| 131 | 0 |     r1 += modinfo->modulus.v[1] & cond_add; | 
| 132 | 0 |     r2 += modinfo->modulus.v[2] & cond_add; | 
| 133 | 0 |     r3 += modinfo->modulus.v[3] & cond_add; | 
| 134 | 0 |     r4 += modinfo->modulus.v[4] & cond_add; | 
| 135 |  |     /* And propagate again. */ | 
| 136 | 0 |     r1 += r0 >> 62; r0 &= M62; | 
| 137 | 0 |     r2 += r1 >> 62; r1 &= M62; | 
| 138 | 0 |     r3 += r2 >> 62; r2 &= M62; | 
| 139 | 0 |     r4 += r3 >> 62; r3 &= M62; | 
| 140 |  | 
 | 
| 141 | 0 |     r->v[0] = r0; | 
| 142 | 0 |     r->v[1] = r1; | 
| 143 | 0 |     r->v[2] = r2; | 
| 144 | 0 |     r->v[3] = r3; | 
| 145 | 0 |     r->v[4] = r4; | 
| 146 |  | 
 | 
| 147 | 0 |     VERIFY_CHECK(r0 >> 62 == 0); | 
| 148 | 0 |     VERIFY_CHECK(r1 >> 62 == 0); | 
| 149 | 0 |     VERIFY_CHECK(r2 >> 62 == 0); | 
| 150 | 0 |     VERIFY_CHECK(r3 >> 62 == 0); | 
| 151 | 0 |     VERIFY_CHECK(r4 >> 62 == 0); | 
| 152 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ | 
| 153 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ | 
| 154 | 0 | } | 
| 155 |  |  | 
| 156 |  | /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). | 
| 157 |  |  * Note that the transformation matrix is scaled by 2^62 and not 2^59. | 
| 158 |  |  * | 
| 159 |  |  * Input:  zeta: initial zeta | 
| 160 |  |  *         f0:   bottom limb of initial f | 
| 161 |  |  *         g0:   bottom limb of initial g | 
| 162 |  |  * Output: t: transition matrix | 
| 163 |  |  * Return: final zeta | 
| 164 |  |  * | 
| 165 |  |  * Implements the divsteps_n_matrix function from the explanation. | 
| 166 |  |  */ | 
| 167 | 0 | static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) { | 
| 168 |  |     /* u,v,q,r are the elements of the transformation matrix being built up, | 
| 169 |  |      * starting with the identity matrix times 8 (because the caller expects | 
| 170 |  |      * a result scaled by 2^62). Semantically they are signed integers | 
| 171 |  |      * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This | 
| 172 |  |      * permits left shifting (which is UB for negative numbers). The range | 
| 173 |  |      * being inside [-2^63,2^63) means that casting to signed works correctly. | 
| 174 |  |      */ | 
| 175 | 0 |     uint64_t u = 8, v = 0, q = 0, r = 8; | 
| 176 | 0 |     volatile uint64_t c1, c2; | 
| 177 | 0 |     uint64_t mask1, mask2, f = f0, g = g0, x, y, z; | 
| 178 | 0 |     int i; | 
| 179 |  | 
 | 
| 180 | 0 |     for (i = 3; i < 62; ++i) { | 
| 181 | 0 |         VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ | 
| 182 | 0 |         VERIFY_CHECK((u * f0 + v * g0) == f << i); | 
| 183 | 0 |         VERIFY_CHECK((q * f0 + r * g0) == g << i); | 
| 184 |  |         /* Compute conditional masks for (zeta < 0) and for (g & 1). */ | 
| 185 | 0 |         c1 = zeta >> 63; | 
| 186 | 0 |         mask1 = c1; | 
| 187 | 0 |         c2 = g & 1; | 
| 188 | 0 |         mask2 = -c2; | 
| 189 |  |         /* Compute x,y,z, conditionally negated versions of f,u,v. */ | 
| 190 | 0 |         x = (f ^ mask1) - mask1; | 
| 191 | 0 |         y = (u ^ mask1) - mask1; | 
| 192 | 0 |         z = (v ^ mask1) - mask1; | 
| 193 |  |         /* Conditionally add x,y,z to g,q,r. */ | 
| 194 | 0 |         g += x & mask2; | 
| 195 | 0 |         q += y & mask2; | 
| 196 | 0 |         r += z & mask2; | 
| 197 |  |         /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */ | 
| 198 | 0 |         mask1 &= mask2; | 
| 199 |  |         /* Conditionally change zeta into -zeta-2 or zeta-1. */ | 
| 200 | 0 |         zeta = (zeta ^ mask1) - 1; | 
| 201 |  |         /* Conditionally add g,q,r to f,u,v. */ | 
| 202 | 0 |         f += g & mask1; | 
| 203 | 0 |         u += q & mask1; | 
| 204 | 0 |         v += r & mask1; | 
| 205 |  |         /* Shifts */ | 
| 206 | 0 |         g >>= 1; | 
| 207 | 0 |         u <<= 1; | 
| 208 | 0 |         v <<= 1; | 
| 209 |  |         /* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */ | 
| 210 | 0 |         VERIFY_CHECK(zeta >= -591 && zeta <= 591); | 
| 211 | 0 |     } | 
| 212 |  |     /* Return data in t and return value. */ | 
| 213 | 0 |     t->u = (int64_t)u; | 
| 214 | 0 |     t->v = (int64_t)v; | 
| 215 | 0 |     t->q = (int64_t)q; | 
| 216 | 0 |     t->r = (int64_t)r; | 
| 217 |  |  | 
| 218 |  |     /* The determinant of t must be a power of two. This guarantees that multiplication with t | 
| 219 |  |      * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which | 
| 220 |  |      * will be divided out again). As each divstep's individual matrix has determinant 2, the | 
| 221 |  |      * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial | 
| 222 |  |      * 8*identity (which has determinant 2^6) means the overall outputs has determinant | 
| 223 |  |      * 2^65. */ | 
| 224 | 0 |     VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0)); | 
| 225 |  | 
 | 
| 226 | 0 |     return zeta; | 
| 227 | 0 | } | 
| 228 |  |  | 
| 229 |  | /* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta). | 
| 230 |  |  * | 
| 231 |  |  * Input:  eta: initial eta | 
| 232 |  |  *         f0:  bottom limb of initial f | 
| 233 |  |  *         g0:  bottom limb of initial g | 
| 234 |  |  * Output: t: transition matrix | 
| 235 |  |  * Return: final eta | 
| 236 |  |  * | 
| 237 |  |  * Implements the divsteps_n_matrix_var function from the explanation. | 
| 238 |  |  */ | 
| 239 | 0 | static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) { | 
| 240 |  |     /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */ | 
| 241 | 0 |     uint64_t u = 1, v = 0, q = 0, r = 1; | 
| 242 | 0 |     uint64_t f = f0, g = g0, m; | 
| 243 | 0 |     uint32_t w; | 
| 244 | 0 |     int i = 62, limit, zeros; | 
| 245 |  | 
 | 
| 246 | 0 |     for (;;) { | 
| 247 |  |         /* Use a sentinel bit to count zeros only up to i. */ | 
| 248 | 0 |         zeros = secp256k1_ctz64_var(g | (UINT64_MAX << i)); | 
| 249 |  |         /* Perform zeros divsteps at once; they all just divide g by two. */ | 
| 250 | 0 |         g >>= zeros; | 
| 251 | 0 |         u <<= zeros; | 
| 252 | 0 |         v <<= zeros; | 
| 253 | 0 |         eta -= zeros; | 
| 254 | 0 |         i -= zeros; | 
| 255 |  |         /* We're done once we've done 62 divsteps. */ | 
| 256 | 0 |         if (i == 0) break; | 
| 257 | 0 |         VERIFY_CHECK((f & 1) == 1); | 
| 258 | 0 |         VERIFY_CHECK((g & 1) == 1); | 
| 259 | 0 |         VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); | 
| 260 | 0 |         VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); | 
| 261 |  |         /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */ | 
| 262 | 0 |         VERIFY_CHECK(eta >= -745 && eta <= 745); | 
| 263 |  |         /* If eta is negative, negate it and replace f,g with g,-f. */ | 
| 264 | 0 |         if (eta < 0) { | 
| 265 | 0 |             uint64_t tmp; | 
| 266 | 0 |             eta = -eta; | 
| 267 | 0 |             tmp = f; f = g; g = -tmp; | 
| 268 | 0 |             tmp = u; u = q; q = -tmp; | 
| 269 | 0 |             tmp = v; v = r; r = -tmp; | 
| 270 |  |             /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled | 
| 271 |  |              * out (as we'd be done before that point), and no more than eta+1 can be done as its | 
| 272 |  |              * sign will flip again once that happens. */ | 
| 273 | 0 |             limit = ((int)eta + 1) > i ? i : ((int)eta + 1); | 
| 274 | 0 |             VERIFY_CHECK(limit > 0 && limit <= 62); | 
| 275 |  |             /* m is a mask for the bottom min(limit, 6) bits. */ | 
| 276 | 0 |             m = (UINT64_MAX >> (64 - limit)) & 63U; | 
| 277 |  |             /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6) | 
| 278 |  |              * bits. */ | 
| 279 | 0 |             w = (f * g * (f * f - 2)) & m; | 
| 280 | 0 |         } else { | 
| 281 |  |             /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as | 
| 282 |  |              * eta tends to be smaller here. */ | 
| 283 | 0 |             limit = ((int)eta + 1) > i ? i : ((int)eta + 1); | 
| 284 | 0 |             VERIFY_CHECK(limit > 0 && limit <= 62); | 
| 285 |  |             /* m is a mask for the bottom min(limit, 4) bits. */ | 
| 286 | 0 |             m = (UINT64_MAX >> (64 - limit)) & 15U; | 
| 287 |  |             /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4) | 
| 288 |  |              * bits. */ | 
| 289 | 0 |             w = f + (((f + 1) & 4) << 1); | 
| 290 | 0 |             w = (-w * g) & m; | 
| 291 | 0 |         } | 
| 292 | 0 |         g += f * w; | 
| 293 | 0 |         q += u * w; | 
| 294 | 0 |         r += v * w; | 
| 295 | 0 |         VERIFY_CHECK((g & m) == 0); | 
| 296 | 0 |     } | 
| 297 |  |     /* Return data in t and return value. */ | 
| 298 | 0 |     t->u = (int64_t)u; | 
| 299 | 0 |     t->v = (int64_t)v; | 
| 300 | 0 |     t->q = (int64_t)q; | 
| 301 | 0 |     t->r = (int64_t)r; | 
| 302 |  |  | 
| 303 |  |     /* The determinant of t must be a power of two. This guarantees that multiplication with t | 
| 304 |  |      * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which | 
| 305 |  |      * will be divided out again). As each divstep's individual matrix has determinant 2, the | 
| 306 |  |      * aggregate of 62 of them will have determinant 2^62. */ | 
| 307 | 0 |     VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0)); | 
| 308 |  | 
 | 
| 309 | 0 |     return eta; | 
| 310 | 0 | } | 
| 311 |  |  | 
| 312 |  | /* Compute the transition matrix and eta for 62 posdivsteps (variable time, eta=-delta), and keeps track | 
| 313 |  |  * of the Jacobi symbol along the way. f0 and g0 must be f and g mod 2^64 rather than 2^62, because | 
| 314 |  |  * Jacobi tracking requires knowing (f mod 8) rather than just (f mod 2). | 
| 315 |  |  * | 
| 316 |  |  * Input:        eta: initial eta | 
| 317 |  |  *               f0:  bottom limb of initial f | 
| 318 |  |  *               g0:  bottom limb of initial g | 
| 319 |  |  * Output:       t: transition matrix | 
| 320 |  |  * Input/Output: (*jacp & 1) is bitflipped if and only if the Jacobi symbol of (f | g) changes sign | 
| 321 |  |  *               by applying the returned transformation matrix to it. The other bits of *jacp may | 
| 322 |  |  *               change, but are meaningless. | 
| 323 |  |  * Return:       final eta | 
| 324 |  |  */ | 
| 325 | 0 | static int64_t secp256k1_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t, int *jacp) { | 
| 326 |  |     /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */ | 
| 327 | 0 |     uint64_t u = 1, v = 0, q = 0, r = 1; | 
| 328 | 0 |     uint64_t f = f0, g = g0, m; | 
| 329 | 0 |     uint32_t w; | 
| 330 | 0 |     int i = 62, limit, zeros; | 
| 331 | 0 |     int jac = *jacp; | 
| 332 |  | 
 | 
| 333 | 0 |     for (;;) { | 
| 334 |  |         /* Use a sentinel bit to count zeros only up to i. */ | 
| 335 | 0 |         zeros = secp256k1_ctz64_var(g | (UINT64_MAX << i)); | 
| 336 |  |         /* Perform zeros divsteps at once; they all just divide g by two. */ | 
| 337 | 0 |         g >>= zeros; | 
| 338 | 0 |         u <<= zeros; | 
| 339 | 0 |         v <<= zeros; | 
| 340 | 0 |         eta -= zeros; | 
| 341 | 0 |         i -= zeros; | 
| 342 |  |         /* Update the bottom bit of jac: when dividing g by an odd power of 2, | 
| 343 |  |          * if (f mod 8) is 3 or 5, the Jacobi symbol changes sign. */ | 
| 344 | 0 |         jac ^= (zeros & ((f >> 1) ^ (f >> 2))); | 
| 345 |  |         /* We're done once we've done 62 posdivsteps. */ | 
| 346 | 0 |         if (i == 0) break; | 
| 347 | 0 |         VERIFY_CHECK((f & 1) == 1); | 
| 348 | 0 |         VERIFY_CHECK((g & 1) == 1); | 
| 349 | 0 |         VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); | 
| 350 | 0 |         VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); | 
| 351 |  |         /* If eta is negative, negate it and replace f,g with g,f. */ | 
| 352 | 0 |         if (eta < 0) { | 
| 353 | 0 |             uint64_t tmp; | 
| 354 | 0 |             eta = -eta; | 
| 355 | 0 |             tmp = f; f = g; g = tmp; | 
| 356 | 0 |             tmp = u; u = q; q = tmp; | 
| 357 | 0 |             tmp = v; v = r; r = tmp; | 
| 358 |  |             /* Update bottom bit of jac: when swapping f and g, the Jacobi symbol changes sign | 
| 359 |  |              * if both f and g are 3 mod 4. */ | 
| 360 | 0 |             jac ^= ((f & g) >> 1); | 
| 361 |  |             /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled | 
| 362 |  |              * out (as we'd be done before that point), and no more than eta+1 can be done as its | 
| 363 |  |              * sign will flip again once that happens. */ | 
| 364 | 0 |             limit = ((int)eta + 1) > i ? i : ((int)eta + 1); | 
| 365 | 0 |             VERIFY_CHECK(limit > 0 && limit <= 62); | 
| 366 |  |             /* m is a mask for the bottom min(limit, 6) bits. */ | 
| 367 | 0 |             m = (UINT64_MAX >> (64 - limit)) & 63U; | 
| 368 |  |             /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6) | 
| 369 |  |              * bits. */ | 
| 370 | 0 |             w = (f * g * (f * f - 2)) & m; | 
| 371 | 0 |         } else { | 
| 372 |  |             /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as | 
| 373 |  |              * eta tends to be smaller here. */ | 
| 374 | 0 |             limit = ((int)eta + 1) > i ? i : ((int)eta + 1); | 
| 375 | 0 |             VERIFY_CHECK(limit > 0 && limit <= 62); | 
| 376 |  |             /* m is a mask for the bottom min(limit, 4) bits. */ | 
| 377 | 0 |             m = (UINT64_MAX >> (64 - limit)) & 15U; | 
| 378 |  |             /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4) | 
| 379 |  |              * bits. */ | 
| 380 | 0 |             w = f + (((f + 1) & 4) << 1); | 
| 381 | 0 |             w = (-w * g) & m; | 
| 382 | 0 |         } | 
| 383 | 0 |         g += f * w; | 
| 384 | 0 |         q += u * w; | 
| 385 | 0 |         r += v * w; | 
| 386 | 0 |         VERIFY_CHECK((g & m) == 0); | 
| 387 | 0 |     } | 
| 388 |  |     /* Return data in t and return value. */ | 
| 389 | 0 |     t->u = (int64_t)u; | 
| 390 | 0 |     t->v = (int64_t)v; | 
| 391 | 0 |     t->q = (int64_t)q; | 
| 392 | 0 |     t->r = (int64_t)r; | 
| 393 |  |  | 
| 394 |  |     /* The determinant of t must be a power of two. This guarantees that multiplication with t | 
| 395 |  |      * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which | 
| 396 |  |      * will be divided out again). As each divstep's individual matrix has determinant 2 or -2, | 
| 397 |  |      * the aggregate of 62 of them will have determinant 2^62 or -2^62. */ | 
| 398 | 0 |     VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1)); | 
| 399 |  | 
 | 
| 400 | 0 |     *jacp = jac; | 
| 401 | 0 |     return eta; | 
| 402 | 0 | } | 
| 403 |  |  | 
| 404 |  | /* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62. | 
| 405 |  |  * | 
| 406 |  |  * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range | 
| 407 |  |  * (-2^62,2^62). | 
| 408 |  |  * | 
| 409 |  |  * This implements the update_de function from the explanation. | 
| 410 |  |  */ | 
| 411 | 0 | static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp256k1_modinv64_signed62 *e, const secp256k1_modinv64_trans2x2 *t, const secp256k1_modinv64_modinfo* modinfo) { | 
| 412 | 0 |     const uint64_t M62 = UINT64_MAX >> 2; | 
| 413 | 0 |     const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; | 
| 414 | 0 |     const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; | 
| 415 | 0 |     const int64_t u = t->u, v = t->v, q = t->q, r = t->r; | 
| 416 | 0 |     int64_t md, me, sd, se; | 
| 417 | 0 |     secp256k1_int128 cd, ce; | 
| 418 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ | 
| 419 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0);  /* d <    modulus */ | 
| 420 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ | 
| 421 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0);  /* e <    modulus */ | 
| 422 | 0 |     VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */ | 
| 423 | 0 |     VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */ | 
| 424 |  |  | 
| 425 |  |     /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ | 
| 426 | 0 |     sd = d4 >> 63; | 
| 427 | 0 |     se = e4 >> 63; | 
| 428 | 0 |     md = (u & sd) + (v & se); | 
| 429 | 0 |     me = (q & sd) + (r & se); | 
| 430 |  |     /* Begin computing t*[d,e]. */ | 
| 431 | 0 |     secp256k1_i128_mul(&cd, u, d0); | 
| 432 | 0 |     secp256k1_i128_accum_mul(&cd, v, e0); | 
| 433 | 0 |     secp256k1_i128_mul(&ce, q, d0); | 
| 434 | 0 |     secp256k1_i128_accum_mul(&ce, r, e0); | 
| 435 |  |     /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ | 
| 436 | 0 |     md -= (modinfo->modulus_inv62 * secp256k1_i128_to_u64(&cd) + md) & M62; | 
| 437 | 0 |     me -= (modinfo->modulus_inv62 * secp256k1_i128_to_u64(&ce) + me) & M62; | 
| 438 |  |     /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ | 
| 439 | 0 |     secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[0], md); | 
| 440 | 0 |     secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[0], me); | 
| 441 |  |     /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ | 
| 442 | 0 |     VERIFY_CHECK((secp256k1_i128_to_u64(&cd) & M62) == 0); secp256k1_i128_rshift(&cd, 62); | 
| 443 | 0 |     VERIFY_CHECK((secp256k1_i128_to_u64(&ce) & M62) == 0); secp256k1_i128_rshift(&ce, 62); | 
| 444 |  |     /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ | 
| 445 | 0 |     secp256k1_i128_accum_mul(&cd, u, d1); | 
| 446 | 0 |     secp256k1_i128_accum_mul(&cd, v, e1); | 
| 447 | 0 |     secp256k1_i128_accum_mul(&ce, q, d1); | 
| 448 | 0 |     secp256k1_i128_accum_mul(&ce, r, e1); | 
| 449 | 0 |     if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */ | 
| 450 | 0 |         secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[1], md); | 
| 451 | 0 |         secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[1], me); | 
| 452 | 0 |     } | 
| 453 | 0 |     d->v[0] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); | 
| 454 | 0 |     e->v[0] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); | 
| 455 |  |     /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ | 
| 456 | 0 |     secp256k1_i128_accum_mul(&cd, u, d2); | 
| 457 | 0 |     secp256k1_i128_accum_mul(&cd, v, e2); | 
| 458 | 0 |     secp256k1_i128_accum_mul(&ce, q, d2); | 
| 459 | 0 |     secp256k1_i128_accum_mul(&ce, r, e2); | 
| 460 | 0 |     if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */ | 
| 461 | 0 |         secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[2], md); | 
| 462 | 0 |         secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[2], me); | 
| 463 | 0 |     } | 
| 464 | 0 |     d->v[1] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); | 
| 465 | 0 |     e->v[1] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); | 
| 466 |  |     /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ | 
| 467 | 0 |     secp256k1_i128_accum_mul(&cd, u, d3); | 
| 468 | 0 |     secp256k1_i128_accum_mul(&cd, v, e3); | 
| 469 | 0 |     secp256k1_i128_accum_mul(&ce, q, d3); | 
| 470 | 0 |     secp256k1_i128_accum_mul(&ce, r, e3); | 
| 471 | 0 |     if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */ | 
| 472 | 0 |         secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[3], md); | 
| 473 | 0 |         secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[3], me); | 
| 474 | 0 |     } | 
| 475 | 0 |     d->v[2] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); | 
| 476 | 0 |     e->v[2] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); | 
| 477 |  |     /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ | 
| 478 | 0 |     secp256k1_i128_accum_mul(&cd, u, d4); | 
| 479 | 0 |     secp256k1_i128_accum_mul(&cd, v, e4); | 
| 480 | 0 |     secp256k1_i128_accum_mul(&ce, q, d4); | 
| 481 | 0 |     secp256k1_i128_accum_mul(&ce, r, e4); | 
| 482 | 0 |     secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[4], md); | 
| 483 | 0 |     secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[4], me); | 
| 484 | 0 |     d->v[3] = secp256k1_i128_to_u64(&cd) & M62; secp256k1_i128_rshift(&cd, 62); | 
| 485 | 0 |     e->v[3] = secp256k1_i128_to_u64(&ce) & M62; secp256k1_i128_rshift(&ce, 62); | 
| 486 |  |     /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ | 
| 487 | 0 |     d->v[4] = secp256k1_i128_to_i64(&cd); | 
| 488 | 0 |     e->v[4] = secp256k1_i128_to_i64(&ce); | 
| 489 |  | 
 | 
| 490 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ | 
| 491 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0);  /* d <    modulus */ | 
| 492 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ | 
| 493 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0);  /* e <    modulus */ | 
| 494 | 0 | } | 
| 495 |  |  | 
| 496 |  | /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62. | 
| 497 |  |  * | 
| 498 |  |  * This implements the update_fg function from the explanation. | 
| 499 |  |  */ | 
| 500 | 0 | static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) { | 
| 501 | 0 |     const uint64_t M62 = UINT64_MAX >> 2; | 
| 502 | 0 |     const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; | 
| 503 | 0 |     const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; | 
| 504 | 0 |     const int64_t u = t->u, v = t->v, q = t->q, r = t->r; | 
| 505 | 0 |     secp256k1_int128 cf, cg; | 
| 506 |  |     /* Start computing t*[f,g]. */ | 
| 507 | 0 |     secp256k1_i128_mul(&cf, u, f0); | 
| 508 | 0 |     secp256k1_i128_accum_mul(&cf, v, g0); | 
| 509 | 0 |     secp256k1_i128_mul(&cg, q, f0); | 
| 510 | 0 |     secp256k1_i128_accum_mul(&cg, r, g0); | 
| 511 |  |     /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ | 
| 512 | 0 |     VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62); | 
| 513 | 0 |     VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62); | 
| 514 |  |     /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ | 
| 515 | 0 |     secp256k1_i128_accum_mul(&cf, u, f1); | 
| 516 | 0 |     secp256k1_i128_accum_mul(&cf, v, g1); | 
| 517 | 0 |     secp256k1_i128_accum_mul(&cg, q, f1); | 
| 518 | 0 |     secp256k1_i128_accum_mul(&cg, r, g1); | 
| 519 | 0 |     f->v[0] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); | 
| 520 | 0 |     g->v[0] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); | 
| 521 |  |     /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ | 
| 522 | 0 |     secp256k1_i128_accum_mul(&cf, u, f2); | 
| 523 | 0 |     secp256k1_i128_accum_mul(&cf, v, g2); | 
| 524 | 0 |     secp256k1_i128_accum_mul(&cg, q, f2); | 
| 525 | 0 |     secp256k1_i128_accum_mul(&cg, r, g2); | 
| 526 | 0 |     f->v[1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); | 
| 527 | 0 |     g->v[1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); | 
| 528 |  |     /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ | 
| 529 | 0 |     secp256k1_i128_accum_mul(&cf, u, f3); | 
| 530 | 0 |     secp256k1_i128_accum_mul(&cf, v, g3); | 
| 531 | 0 |     secp256k1_i128_accum_mul(&cg, q, f3); | 
| 532 | 0 |     secp256k1_i128_accum_mul(&cg, r, g3); | 
| 533 | 0 |     f->v[2] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); | 
| 534 | 0 |     g->v[2] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); | 
| 535 |  |     /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ | 
| 536 | 0 |     secp256k1_i128_accum_mul(&cf, u, f4); | 
| 537 | 0 |     secp256k1_i128_accum_mul(&cf, v, g4); | 
| 538 | 0 |     secp256k1_i128_accum_mul(&cg, q, f4); | 
| 539 | 0 |     secp256k1_i128_accum_mul(&cg, r, g4); | 
| 540 | 0 |     f->v[3] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); | 
| 541 | 0 |     g->v[3] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); | 
| 542 |  |     /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ | 
| 543 | 0 |     f->v[4] = secp256k1_i128_to_i64(&cf); | 
| 544 | 0 |     g->v[4] = secp256k1_i128_to_i64(&cg); | 
| 545 | 0 | } | 
| 546 |  |  | 
| 547 |  | /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. | 
| 548 |  |  * | 
| 549 |  |  * Version that operates on a variable number of limbs in f and g. | 
| 550 |  |  * | 
| 551 |  |  * This implements the update_fg function from the explanation. | 
| 552 |  |  */ | 
| 553 | 0 | static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t) { | 
| 554 | 0 |     const uint64_t M62 = UINT64_MAX >> 2; | 
| 555 | 0 |     const int64_t u = t->u, v = t->v, q = t->q, r = t->r; | 
| 556 | 0 |     int64_t fi, gi; | 
| 557 | 0 |     secp256k1_int128 cf, cg; | 
| 558 | 0 |     int i; | 
| 559 | 0 |     VERIFY_CHECK(len > 0); | 
| 560 |  |     /* Start computing t*[f,g]. */ | 
| 561 | 0 |     fi = f->v[0]; | 
| 562 | 0 |     gi = g->v[0]; | 
| 563 | 0 |     secp256k1_i128_mul(&cf, u, fi); | 
| 564 | 0 |     secp256k1_i128_accum_mul(&cf, v, gi); | 
| 565 | 0 |     secp256k1_i128_mul(&cg, q, fi); | 
| 566 | 0 |     secp256k1_i128_accum_mul(&cg, r, gi); | 
| 567 |  |     /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ | 
| 568 | 0 |     VERIFY_CHECK((secp256k1_i128_to_u64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62); | 
| 569 | 0 |     VERIFY_CHECK((secp256k1_i128_to_u64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62); | 
| 570 |  |     /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting | 
| 571 |  |      * down by 62 bits). */ | 
| 572 | 0 |     for (i = 1; i < len; ++i) { | 
| 573 | 0 |         fi = f->v[i]; | 
| 574 | 0 |         gi = g->v[i]; | 
| 575 | 0 |         secp256k1_i128_accum_mul(&cf, u, fi); | 
| 576 | 0 |         secp256k1_i128_accum_mul(&cf, v, gi); | 
| 577 | 0 |         secp256k1_i128_accum_mul(&cg, q, fi); | 
| 578 | 0 |         secp256k1_i128_accum_mul(&cg, r, gi); | 
| 579 | 0 |         f->v[i - 1] = secp256k1_i128_to_u64(&cf) & M62; secp256k1_i128_rshift(&cf, 62); | 
| 580 | 0 |         g->v[i - 1] = secp256k1_i128_to_u64(&cg) & M62; secp256k1_i128_rshift(&cg, 62); | 
| 581 | 0 |     } | 
| 582 |  |     /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ | 
| 583 | 0 |     f->v[len - 1] = secp256k1_i128_to_i64(&cf); | 
| 584 | 0 |     g->v[len - 1] = secp256k1_i128_to_i64(&cg); | 
| 585 | 0 | } | 
| 586 |  |  | 
| 587 |  | /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ | 
| 588 | 0 | static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) { | 
| 589 |  |     /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ | 
| 590 | 0 |     secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; | 
| 591 | 0 |     secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; | 
| 592 | 0 |     secp256k1_modinv64_signed62 f = modinfo->modulus; | 
| 593 | 0 |     secp256k1_modinv64_signed62 g = *x; | 
| 594 | 0 |     int i; | 
| 595 | 0 |     int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */ | 
| 596 |  |  | 
| 597 |  |     /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */ | 
| 598 | 0 |     for (i = 0; i < 10; ++i) { | 
| 599 |  |         /* Compute transition matrix and new zeta after 59 divsteps. */ | 
| 600 | 0 |         secp256k1_modinv64_trans2x2 t; | 
| 601 | 0 |         zeta = secp256k1_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); | 
| 602 |  |         /* Update d,e using that transition matrix. */ | 
| 603 | 0 |         secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); | 
| 604 |  |         /* Update f,g using that transition matrix. */ | 
| 605 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ | 
| 606 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ | 
| 607 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ | 
| 608 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0);  /* g <  modulus */ | 
| 609 |  | 
 | 
| 610 | 0 |         secp256k1_modinv64_update_fg_62(&f, &g, &t); | 
| 611 |  | 
 | 
| 612 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ | 
| 613 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ | 
| 614 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ | 
| 615 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0);  /* g <  modulus */ | 
| 616 | 0 |     } | 
| 617 |  |  | 
| 618 |  |     /* At this point sufficient iterations have been performed that g must have reached 0 | 
| 619 |  |      * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g | 
| 620 |  |      * values i.e. +/- 1, and d now contains +/- the modular inverse. */ | 
| 621 |  |  | 
| 622 |  |     /* g == 0 */ | 
| 623 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); | 
| 624 |  |     /* |f| == 1, or (x == 0 and d == 0 and f == modulus) */ | 
| 625 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || | 
| 626 | 0 |                  secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || | 
| 627 | 0 |                  (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && | 
| 628 | 0 |                   secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && | 
| 629 | 0 |                   secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0)); | 
| 630 |  |  | 
| 631 |  |     /* Optionally negate d, normalize to [0,modulus), and return it. */ | 
| 632 | 0 |     secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo); | 
| 633 | 0 |     *x = d; | 
| 634 | 0 | } | 
| 635 |  |  | 
| 636 |  | /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ | 
| 637 | 0 | static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) { | 
| 638 |  |     /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ | 
| 639 | 0 |     secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; | 
| 640 | 0 |     secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; | 
| 641 | 0 |     secp256k1_modinv64_signed62 f = modinfo->modulus; | 
| 642 | 0 |     secp256k1_modinv64_signed62 g = *x; | 
| 643 |  | #ifdef VERIFY | 
| 644 |  |     int i = 0; | 
| 645 |  | #endif | 
| 646 | 0 |     int j, len = 5; | 
| 647 | 0 |     int64_t eta = -1; /* eta = -delta; delta is initially 1 */ | 
| 648 | 0 |     int64_t cond, fn, gn; | 
| 649 |  |  | 
| 650 |  |     /* Do iterations of 62 divsteps each until g=0. */ | 
| 651 | 0 |     while (1) { | 
| 652 |  |         /* Compute transition matrix and new eta after 62 divsteps. */ | 
| 653 | 0 |         secp256k1_modinv64_trans2x2 t; | 
| 654 | 0 |         eta = secp256k1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); | 
| 655 |  |         /* Update d,e using that transition matrix. */ | 
| 656 | 0 |         secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); | 
| 657 |  |         /* Update f,g using that transition matrix. */ | 
| 658 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ | 
| 659 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ | 
| 660 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ | 
| 661 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g <  modulus */ | 
| 662 |  | 
 | 
| 663 | 0 |         secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); | 
| 664 |  |         /* If the bottom limb of g is zero, there is a chance that g=0. */ | 
| 665 | 0 |         if (g.v[0] == 0) { | 
| 666 | 0 |             cond = 0; | 
| 667 |  |             /* Check if the other limbs are also 0. */ | 
| 668 | 0 |             for (j = 1; j < len; ++j) { | 
| 669 | 0 |                 cond |= g.v[j]; | 
| 670 | 0 |             } | 
| 671 |  |             /* If so, we're done. */ | 
| 672 | 0 |             if (cond == 0) break; | 
| 673 | 0 |         } | 
| 674 |  |  | 
| 675 |  |         /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ | 
| 676 | 0 |         fn = f.v[len - 1]; | 
| 677 | 0 |         gn = g.v[len - 1]; | 
| 678 | 0 |         cond = ((int64_t)len - 2) >> 63; | 
| 679 | 0 |         cond |= fn ^ (fn >> 63); | 
| 680 | 0 |         cond |= gn ^ (gn >> 63); | 
| 681 |  |         /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ | 
| 682 | 0 |         if (cond == 0) { | 
| 683 | 0 |             f.v[len - 2] |= (uint64_t)fn << 62; | 
| 684 | 0 |             g.v[len - 2] |= (uint64_t)gn << 62; | 
| 685 | 0 |             --len; | 
| 686 | 0 |         } | 
| 687 |  | 
 | 
| 688 | 0 |         VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ | 
| 689 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ | 
| 690 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ | 
| 691 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ | 
| 692 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g <  modulus */ | 
| 693 | 0 |     } | 
| 694 |  |  | 
| 695 |  |     /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of | 
| 696 |  |      * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ | 
| 697 |  |  | 
| 698 |  |     /* g == 0 */ | 
| 699 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); | 
| 700 |  |     /* |f| == 1, or (x == 0 and d == 0 and f == modulus) */ | 
| 701 | 0 |     VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || | 
| 702 | 0 |                  secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || | 
| 703 | 0 |                  (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && | 
| 704 | 0 |                   secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && | 
| 705 | 0 |                   secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0)); | 
| 706 |  |  | 
| 707 |  |     /* Optionally negate d, normalize to [0,modulus), and return it. */ | 
| 708 | 0 |     secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo); | 
| 709 | 0 |     *x = d; | 
| 710 | 0 | } | 
| 711 |  |  | 
| 712 |  | /* Do up to 25 iterations of 62 posdivsteps (up to 1550 steps; more is extremely rare) each until f=1. | 
| 713 |  |  * In VERIFY mode use a lower number of iterations (744, close to the median 756), so failure actually occurs. */ | 
| 714 |  | #ifdef VERIFY | 
| 715 |  | #define JACOBI64_ITERATIONS 12 | 
| 716 |  | #else | 
| 717 | 0 | #define JACOBI64_ITERATIONS 25 | 
| 718 |  | #endif | 
| 719 |  |  | 
| 720 |  | /* Compute the Jacobi symbol of x modulo modinfo->modulus (variable time). gcd(x,modulus) must be 1. */ | 
| 721 | 0 | static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo) { | 
| 722 |  |     /* Start with f=modulus, g=x, eta=-1. */ | 
| 723 | 0 |     secp256k1_modinv64_signed62 f = modinfo->modulus; | 
| 724 | 0 |     secp256k1_modinv64_signed62 g = *x; | 
| 725 | 0 |     int j, len = 5; | 
| 726 | 0 |     int64_t eta = -1; /* eta = -delta; delta is initially 1 */ | 
| 727 | 0 |     int64_t cond, fn, gn; | 
| 728 | 0 |     int jac = 0; | 
| 729 | 0 |     int count; | 
| 730 |  |  | 
| 731 |  |     /* The input limbs must all be non-negative. */ | 
| 732 | 0 |     VERIFY_CHECK(g.v[0] >= 0 && g.v[1] >= 0 && g.v[2] >= 0 && g.v[3] >= 0 && g.v[4] >= 0); | 
| 733 |  |  | 
| 734 |  |     /* If x > 0, then if the loop below converges, it converges to f=g=gcd(x,modulus). Since we | 
| 735 |  |      * require that gcd(x,modulus)=1 and modulus>=3, x cannot be 0. Thus, we must reach f=1 (or | 
| 736 |  |      * time out). */ | 
| 737 | 0 |     VERIFY_CHECK((g.v[0] | g.v[1] | g.v[2] | g.v[3] | g.v[4]) != 0); | 
| 738 |  | 
 | 
| 739 | 0 |     for (count = 0; count < JACOBI64_ITERATIONS; ++count) { | 
| 740 |  |         /* Compute transition matrix and new eta after 62 posdivsteps. */ | 
| 741 | 0 |         secp256k1_modinv64_trans2x2 t; | 
| 742 | 0 |         eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac); | 
| 743 |  |         /* Update f,g using that transition matrix. */ | 
| 744 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ | 
| 745 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ | 
| 746 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ | 
| 747 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g < modulus */ | 
| 748 |  | 
 | 
| 749 | 0 |         secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); | 
| 750 |  |         /* If the bottom limb of f is 1, there is a chance that f=1. */ | 
| 751 | 0 |         if (f.v[0] == 1) { | 
| 752 | 0 |             cond = 0; | 
| 753 |  |             /* Check if the other limbs are also 0. */ | 
| 754 | 0 |             for (j = 1; j < len; ++j) { | 
| 755 | 0 |                 cond |= f.v[j]; | 
| 756 | 0 |             } | 
| 757 |  |             /* If so, we're done. When f=1, the Jacobi symbol (g | f)=1. */ | 
| 758 | 0 |             if (cond == 0) return 1 - 2*(jac & 1); | 
| 759 | 0 |         } | 
| 760 |  |  | 
| 761 |  |         /* Determine if len>1 and limb (len-1) of both f and g is 0. */ | 
| 762 | 0 |         fn = f.v[len - 1]; | 
| 763 | 0 |         gn = g.v[len - 1]; | 
| 764 | 0 |         cond = ((int64_t)len - 2) >> 63; | 
| 765 | 0 |         cond |= fn; | 
| 766 | 0 |         cond |= gn; | 
| 767 |  |         /* If so, reduce length. */ | 
| 768 | 0 |         if (cond == 0) --len; | 
| 769 |  | 
 | 
| 770 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ | 
| 771 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ | 
| 772 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ | 
| 773 | 0 |         VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0);  /* g < modulus */ | 
| 774 | 0 |     } | 
| 775 |  |  | 
| 776 |  |     /* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */ | 
| 777 | 0 |     return 0; | 
| 778 | 0 | } | 
| 779 |  |  | 
| 780 |  | #endif /* SECP256K1_MODINV64_IMPL_H */ |