diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h
index aaa576ada4..d0d9631824 100644
--- a/src/secp256k1/src/ecmult_const_impl.h
+++ b/src/secp256k1/src/ecmult_const_impl.h
@@ -1,261 +1,264 @@
 /**********************************************************************
  * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra                  *
  * Distributed under the MIT software license, see the accompanying   *
  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
  **********************************************************************/
 
 #ifndef SECP256K1_ECMULT_CONST_IMPL_H
 #define SECP256K1_ECMULT_CONST_IMPL_H
 
 #include "scalar.h"
 #include "group.h"
 #include "ecmult_const.h"
 #include "ecmult_impl.h"
 
 /* This is like `ECMULT_TABLE_GET_GE` but is constant time */
 #define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
     int m; \
-    int abs_n = (n) * (((n) > 0) * 2 - 1); \
-    int idx_n = abs_n / 2; \
+    /* Extract the sign-bit for a constant time absolute-value. */ \
+    int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
+    int abs_n = ((n) + mask) ^ mask; \
+    int idx_n = abs_n >> 1; \
     secp256k1_fe neg_y; \
     VERIFY_CHECK(((n) & 1) == 1); \
     VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
     VERIFY_CHECK((n) <=  ((1 << ((w)-1)) - 1)); \
     VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
     VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
     for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \
         /* This loop is used to avoid secret data in array indices. See
          * the comment in ecmult_gen_impl.h for rationale. */ \
         secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
         secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
     } \
     (r)->infinity = 0; \
     secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
     secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
 } while(0)
 
 
 /** Convert a number to WNAF notation.
  *  The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val.
  *  It has the following guarantees:
  *  - each wnaf[i] an odd integer between -(1 << w) and (1 << w)
  *  - each wnaf[i] is nonzero
  *  - the number of words set is always WNAF_SIZE(w) + 1
  *
  *  Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar
  *  Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.)
  *  CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003
  *
  *  Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
  */
 static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
     int global_sign;
     int skew = 0;
     int word = 0;
 
     /* 1 2 3 */
     int u_last;
     int u;
 
     int flip;
     int bit;
     secp256k1_scalar s;
     int not_neg_one;
 
     VERIFY_CHECK(w > 0);
     VERIFY_CHECK(size > 0);
 
     /* Note that we cannot handle even numbers by negating them to be odd, as is
      * done in other implementations, since if our scalars were specified to have
      * width < 256 for performance reasons, their negations would have width 256
      * and we'd lose any performance benefit. Instead, we use a technique from
      * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
      * or 2 (for odd) to the number we are encoding, returning a skew value indicating
      * this, and having the caller compensate after doing the multiplication.
      *
      * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in
      * particular, to ensure that the outputs from the endomorphism-split fit into
      * 128 bits). If we negate, the parity of our number flips, inverting which of
      * {1, 2} we want to add to the scalar when ensuring that it's odd. Further
      * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
      * we need to special-case it in this logic. */
     flip = secp256k1_scalar_is_high(scalar);
     /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
     bit = flip ^ !secp256k1_scalar_is_even(scalar);
     /* We check for negative one, since adding 2 to it will cause an overflow */
     secp256k1_scalar_negate(&s, scalar);
     not_neg_one = !secp256k1_scalar_is_one(&s);
     s = *scalar;
     secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
     /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
      * that we added two to it and flipped it. In fact for -1 these operations are
      * identical. We only flipped, but since skewing is required (in the sense that
      * the skew must be 1 or 2, never zero) and flipping is not, we need to change
      * our flags to claim that we only skewed. */
     global_sign = secp256k1_scalar_cond_negate(&s, flip);
     global_sign *= not_neg_one * 2 - 1;
     skew = 1 << bit;
 
     /* 4 */
     u_last = secp256k1_scalar_shr_int(&s, w);
     do {
         int sign;
         int even;
 
         /* 4.1 4.4 */
         u = secp256k1_scalar_shr_int(&s, w);
         /* 4.2 */
         even = ((u & 1) == 0);
         sign = 2 * (u_last > 0) - 1;
         u += sign * even;
         u_last -= sign * even * (1 << w);
 
         /* 4.3, adapted for global sign change */
         wnaf[word++] = u_last * global_sign;
 
         u_last = u;
     } while (word * w < size);
     wnaf[word] = u * global_sign;
 
     VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
     VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
     return skew;
 }
 
 static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) {
     secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
     secp256k1_ge tmpa;
     secp256k1_fe Z;
 
     int skew_1;
 #ifdef USE_ENDOMORPHISM
     secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
     int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
     int skew_lam;
     secp256k1_scalar q_1, q_lam;
 #endif
     int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
 
     int i;
 
     /* build wnaf representation for q. */
     int rsize = size;
 #ifdef USE_ENDOMORPHISM
     if (size > 128) {
         rsize = 128;
         /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
         secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
         skew_1   = secp256k1_wnaf_const(wnaf_1,   &q_1,   WINDOW_A - 1, 128);
         skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
     } else
 #endif
     {
         skew_1   = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
 #ifdef USE_ENDOMORPHISM
         skew_lam = 0;
 #endif
     }
 
     /* Calculate odd multiples of a.
      * All multiples are brought to the same Z 'denominator', which is stored
      * in Z. Due to secp256k1' isomorphism we can do all operations pretending
      * that the Z coordinate was 1, use affine addition formulae, and correct
      * the Z coordinate of the result once at the end.
      */
     secp256k1_gej_set_ge(r, a);
     secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
     for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
         secp256k1_fe_normalize_weak(&pre_a[i].y);
     }
 #ifdef USE_ENDOMORPHISM
     if (size > 128) {
         for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
             secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
         }
+
     }
 #endif
 
     /* first loop iteration (separated out so we can directly set r, rather
      * than having it start at infinity, get doubled several times, then have
      * its new value added to it) */
     i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
     VERIFY_CHECK(i != 0);
     ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
     secp256k1_gej_set_ge(r, &tmpa);
 #ifdef USE_ENDOMORPHISM
     if (size > 128) {
         i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
         VERIFY_CHECK(i != 0);
         ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
         secp256k1_gej_add_ge(r, r, &tmpa);
     }
 #endif
     /* remaining loop iterations */
     for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
         int n;
         int j;
         for (j = 0; j < WINDOW_A - 1; ++j) {
-            secp256k1_gej_double_nonzero(r, r, NULL);
+            secp256k1_gej_double_nonzero(r, r);
         }
 
         n = wnaf_1[i];
         ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
         VERIFY_CHECK(n != 0);
         secp256k1_gej_add_ge(r, r, &tmpa);
 #ifdef USE_ENDOMORPHISM
         if (size > 128) {
             n = wnaf_lam[i];
             ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
             VERIFY_CHECK(n != 0);
             secp256k1_gej_add_ge(r, r, &tmpa);
         }
 #endif
     }
 
     secp256k1_fe_mul(&r->z, &r->z, &Z);
 
     {
         /* Correct for wNAF skew */
         secp256k1_ge correction = *a;
         secp256k1_ge_storage correction_1_stor;
 #ifdef USE_ENDOMORPHISM
         secp256k1_ge_storage correction_lam_stor;
 #endif
         secp256k1_ge_storage a2_stor;
         secp256k1_gej tmpj;
         secp256k1_gej_set_ge(&tmpj, &correction);
         secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
         secp256k1_ge_set_gej(&correction, &tmpj);
         secp256k1_ge_to_storage(&correction_1_stor, a);
 #ifdef USE_ENDOMORPHISM
         if (size > 128) {
             secp256k1_ge_to_storage(&correction_lam_stor, a);
         }
 #endif
         secp256k1_ge_to_storage(&a2_stor, &correction);
 
         /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
         secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
 #ifdef USE_ENDOMORPHISM
         if (size > 128) {
             secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
         }
 #endif
 
         /* Apply the correction */
         secp256k1_ge_from_storage(&correction, &correction_1_stor);
         secp256k1_ge_neg(&correction, &correction);
         secp256k1_gej_add_ge(r, r, &correction);
 
 #ifdef USE_ENDOMORPHISM
         if (size > 128) {
             secp256k1_ge_from_storage(&correction, &correction_lam_stor);
             secp256k1_ge_neg(&correction, &correction);
             secp256k1_ge_mul_lambda(&correction, &correction);
             secp256k1_gej_add_ge(r, r, &correction);
         }
 #endif
     }
 }
 
 #endif /* SECP256K1_ECMULT_CONST_IMPL_H */
diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h
index 8e122ab429..ded4e1dabd 100644
--- a/src/secp256k1/src/group.h
+++ b/src/secp256k1/src/group.h
@@ -1,142 +1,141 @@
 /**********************************************************************
  * Copyright (c) 2013, 2014 Pieter Wuille                             *
  * Distributed under the MIT software license, see the accompanying   *
  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
  **********************************************************************/
 
 #ifndef SECP256K1_GROUP_H
 #define SECP256K1_GROUP_H
 
 #include "num.h"
 #include "field.h"
 
 /** A group element of the secp256k1 curve, in affine coordinates. */
 typedef struct {
     secp256k1_fe x;
     secp256k1_fe y;
     int infinity; /* whether this represents the point at infinity */
 } secp256k1_ge;
 
 #define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0}
 #define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
 
 /** A group element of the secp256k1 curve, in jacobian coordinates. */
 typedef struct {
     secp256k1_fe x; /* actual X: x/z^2 */
     secp256k1_fe y; /* actual Y: y/z^3 */
     secp256k1_fe z;
     int infinity; /* whether this represents the point at infinity */
 } secp256k1_gej;
 
 #define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0}
 #define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1}
 
 typedef struct {
     secp256k1_fe_storage x;
     secp256k1_fe_storage y;
 } secp256k1_ge_storage;
 
 #define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))}
 
 #define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y)
 
 /** Set a group element equal to the point with given X and Y coordinates */
 static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y);
 
 /** Set a group element (affine) equal to the point with the given X coordinate
  *  and a Y coordinate that is a quadratic residue modulo p. The return value
  *  is true iff a coordinate with the given X coordinate exists.
  */
 static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x);
 
 /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness
  *  for Y. Return value indicates whether the result is valid. */
 static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd);
 
 /** Check whether a group element is the point at infinity. */
 static int secp256k1_ge_is_infinity(const secp256k1_ge *a);
 
 /** Check whether a group element is valid (i.e., on the curve). */
 static int secp256k1_ge_is_valid_var(const secp256k1_ge *a);
 
 static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a);
 
 /** Set a group element equal to another which is given in jacobian coordinates */
 static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a);
 
 /** Set a batch of group elements equal to the inputs given in jacobian coordinates */
 static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len);
 
 /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to
  *  the same global z "denominator". zr must contain the known z-ratios such
  *  that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y
  *  coordinates of the result are stored in r, the common z coordinate is
  *  stored in globalz. */
 static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr);
 
 /** Set a group element (affine) equal to the point at infinity. */
 static void secp256k1_ge_set_infinity(secp256k1_ge *r);
 
 /** Set a group element (jacobian) equal to the point at infinity. */
 static void secp256k1_gej_set_infinity(secp256k1_gej *r);
 
 /** Set a group element (jacobian) equal to another which is given in affine coordinates. */
 static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a);
 
 /** Compare the X coordinate of a group element (jacobian). */
 static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a);
 
 /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */
 static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a);
 
 /** Check whether a group element is the point at infinity. */
 static int secp256k1_gej_is_infinity(const secp256k1_gej *a);
 
 /** Check whether a group element's y coordinate is a quadratic residue. */
 static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a);
 
-/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0).
- * a may not be zero. Constant time. */
-static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
+/** Set r equal to the double of a, a cannot be infinity. Constant time. */
+static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a);
 
-/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */
+/** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */
 static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr);
 
-/** Set r equal to the sum of a and b. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */
+/** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */
 static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr);
 
 /** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */
 static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b);
 
 /** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient
     than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time
-    guarantee, and b is allowed to be infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */
+    guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */
 static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr);
 
 /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
 static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
 
 #ifdef USE_ENDOMORPHISM
 /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
 static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
 #endif
 
 /** Clear a secp256k1_gej to prevent leaking sensitive information. */
 static void secp256k1_gej_clear(secp256k1_gej *r);
 
 /** Clear a secp256k1_ge to prevent leaking sensitive information. */
 static void secp256k1_ge_clear(secp256k1_ge *r);
 
 /** Convert a group element to the storage type. */
 static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a);
 
 /** Convert a group element back from the storage type. */
 static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a);
 
 /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
 static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag);
 
 /** Rescale a jacobian point by b which must be non-zero. Constant-time. */
 static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b);
 
 #endif /* SECP256K1_GROUP_H */
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
index 9b93c39e92..43b039becf 100644
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -1,705 +1,708 @@
 /**********************************************************************
  * Copyright (c) 2013, 2014 Pieter Wuille                             *
  * Distributed under the MIT software license, see the accompanying   *
  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
  **********************************************************************/
 
 #ifndef SECP256K1_GROUP_IMPL_H
 #define SECP256K1_GROUP_IMPL_H
 
 #include "num.h"
 #include "field.h"
 #include "group.h"
 
 /* These points can be generated in sage as follows:
  *
  * 0. Setup a worksheet with the following parameters.
  *   b = 4  # whatever CURVE_B will be set to
  *   F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
  *   C = EllipticCurve ([F (0), F (b)])
  *
  * 1. Determine all the small orders available to you. (If there are
  *    no satisfactory ones, go back and change b.)
  *   print C.order().factor(limit=1000)
  *
  * 2. Choose an order as one of the prime factors listed in the above step.
  *    (You can also multiply some to get a composite order, though the
  *    tests will crash trying to invert scalars during signing.) We take a
  *    random point and scale it to drop its order to the desired value.
  *    There is some probability this won't work; just try again.
  *   order = 199
  *   P = C.random_point()
  *   P = (int(P.order()) / int(order)) * P
  *   assert(P.order() == order)
  *
  * 3. Print the values. You'll need to use a vim macro or something to
  *    split the hex output into 4-byte chunks.
  *   print "%x %x" % P.xy()
  */
 #if defined(EXHAUSTIVE_TEST_ORDER)
 #  if EXHAUSTIVE_TEST_ORDER == 199
 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
     0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069,
     0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18,
     0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868,
     0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED
 );
 
 static const int CURVE_B = 4;
 #  elif EXHAUSTIVE_TEST_ORDER == 13
 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
     0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0,
     0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15,
     0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e,
     0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac
 );
 static const int CURVE_B = 2;
 #  else
 #    error No known generator for the specified exhaustive test group order.
 #  endif
 #else
 /** Generator for secp256k1, value 'g' defined in
  *  "Standards for Efficient Cryptography" (SEC2) 2.7.1.
  */
 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST(
     0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
     0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
     0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
     0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
 );
 
 static const int CURVE_B = 7;
 #endif
 
 static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
     secp256k1_fe zi2;
     secp256k1_fe zi3;
     secp256k1_fe_sqr(&zi2, zi);
     secp256k1_fe_mul(&zi3, &zi2, zi);
     secp256k1_fe_mul(&r->x, &a->x, &zi2);
     secp256k1_fe_mul(&r->y, &a->y, &zi3);
     r->infinity = a->infinity;
 }
 
 static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) {
     r->infinity = 0;
     r->x = *x;
     r->y = *y;
 }
 
 static int secp256k1_ge_is_infinity(const secp256k1_ge *a) {
     return a->infinity;
 }
 
 static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) {
     *r = *a;
     secp256k1_fe_normalize_weak(&r->y);
     secp256k1_fe_negate(&r->y, &r->y, 1);
 }
 
 static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
     secp256k1_fe z2, z3;
     r->infinity = a->infinity;
     secp256k1_fe_inv(&a->z, &a->z);
     secp256k1_fe_sqr(&z2, &a->z);
     secp256k1_fe_mul(&z3, &a->z, &z2);
     secp256k1_fe_mul(&a->x, &a->x, &z2);
     secp256k1_fe_mul(&a->y, &a->y, &z3);
     secp256k1_fe_set_int(&a->z, 1);
     r->x = a->x;
     r->y = a->y;
 }
 
 static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
     secp256k1_fe z2, z3;
     r->infinity = a->infinity;
     if (a->infinity) {
         return;
     }
     secp256k1_fe_inv_var(&a->z, &a->z);
     secp256k1_fe_sqr(&z2, &a->z);
     secp256k1_fe_mul(&z3, &a->z, &z2);
     secp256k1_fe_mul(&a->x, &a->x, &z2);
     secp256k1_fe_mul(&a->y, &a->y, &z3);
     secp256k1_fe_set_int(&a->z, 1);
     r->x = a->x;
     r->y = a->y;
 }
 
 static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) {
     secp256k1_fe u;
     size_t i;
     size_t last_i = SIZE_MAX;
 
     for (i = 0; i < len; i++) {
         if (!a[i].infinity) {
             /* Use destination's x coordinates as scratch space */
             if (last_i == SIZE_MAX) {
                 r[i].x = a[i].z;
             } else {
                 secp256k1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z);
             }
             last_i = i;
         }
     }
     if (last_i == SIZE_MAX) {
         return;
     }
     secp256k1_fe_inv_var(&u, &r[last_i].x);
 
     i = last_i;
     while (i > 0) {
         i--;
         if (!a[i].infinity) {
             secp256k1_fe_mul(&r[last_i].x, &r[i].x, &u);
             secp256k1_fe_mul(&u, &u, &a[last_i].z);
             last_i = i;
         }
     }
     VERIFY_CHECK(!a[last_i].infinity);
     r[last_i].x = u;
 
     for (i = 0; i < len; i++) {
         r[i].infinity = a[i].infinity;
         if (!a[i].infinity) {
             secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x);
         }
     }
 }
 
 static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) {
     size_t i = len - 1;
     secp256k1_fe zs;
 
     if (len > 0) {
         /* The z of the final point gives us the "global Z" for the table. */
         r[i].x = a[i].x;
         r[i].y = a[i].y;
         /* Ensure all y values are in weak normal form for fast negation of points */
         secp256k1_fe_normalize_weak(&r[i].y);
         *globalz = a[i].z;
         r[i].infinity = 0;
         zs = zr[i];
 
         /* Work our way backwards, using the z-ratios to scale the x/y values. */
         while (i > 0) {
             if (i != len - 1) {
                 secp256k1_fe_mul(&zs, &zs, &zr[i]);
             }
             i--;
             secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs);
         }
     }
 }
 
 static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
     r->infinity = 1;
     secp256k1_fe_clear(&r->x);
     secp256k1_fe_clear(&r->y);
     secp256k1_fe_clear(&r->z);
 }
 
 static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
     r->infinity = 1;
     secp256k1_fe_clear(&r->x);
     secp256k1_fe_clear(&r->y);
 }
 
 static void secp256k1_gej_clear(secp256k1_gej *r) {
     r->infinity = 0;
     secp256k1_fe_clear(&r->x);
     secp256k1_fe_clear(&r->y);
     secp256k1_fe_clear(&r->z);
 }
 
 static void secp256k1_ge_clear(secp256k1_ge *r) {
     r->infinity = 0;
     secp256k1_fe_clear(&r->x);
     secp256k1_fe_clear(&r->y);
 }
 
 static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) {
     secp256k1_fe x2, x3, c;
     r->x = *x;
     secp256k1_fe_sqr(&x2, x);
     secp256k1_fe_mul(&x3, x, &x2);
     r->infinity = 0;
     secp256k1_fe_set_int(&c, CURVE_B);
     secp256k1_fe_add(&c, &x3);
     return secp256k1_fe_sqrt(&r->y, &c);
 }
 
 static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
     if (!secp256k1_ge_set_xquad(r, x)) {
         return 0;
     }
     secp256k1_fe_normalize_var(&r->y);
     if (secp256k1_fe_is_odd(&r->y) != odd) {
         secp256k1_fe_negate(&r->y, &r->y, 1);
     }
     return 1;
 
 }
 
 static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) {
    r->infinity = a->infinity;
    r->x = a->x;
    r->y = a->y;
    secp256k1_fe_set_int(&r->z, 1);
 }
 
 static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) {
     secp256k1_fe r, r2;
     VERIFY_CHECK(!a->infinity);
     secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
     r2 = a->x; secp256k1_fe_normalize_weak(&r2);
     return secp256k1_fe_equal_var(&r, &r2);
 }
 
 static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
     r->infinity = a->infinity;
     r->x = a->x;
     r->y = a->y;
     r->z = a->z;
     secp256k1_fe_normalize_weak(&r->y);
     secp256k1_fe_negate(&r->y, &r->y, 1);
 }
 
 static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
     return a->infinity;
 }
 
 static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) {
     secp256k1_fe y2, x3, z2, z6;
     if (a->infinity) {
         return 0;
     }
     /** y^2 = x^3 + 7
      *  (Y/Z^3)^2 = (X/Z^2)^3 + 7
      *  Y^2 / Z^6 = X^3 / Z^6 + 7
      *  Y^2 = X^3 + 7*Z^6
      */
     secp256k1_fe_sqr(&y2, &a->y);
     secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
     secp256k1_fe_sqr(&z2, &a->z);
     secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
     secp256k1_fe_mul_int(&z6, CURVE_B);
     secp256k1_fe_add(&x3, &z6);
     secp256k1_fe_normalize_weak(&x3);
     return secp256k1_fe_equal_var(&y2, &x3);
 }
 
 static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
     secp256k1_fe y2, x3, c;
     if (a->infinity) {
         return 0;
     }
     /* y^2 = x^3 + 7 */
     secp256k1_fe_sqr(&y2, &a->y);
     secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
     secp256k1_fe_set_int(&c, CURVE_B);
     secp256k1_fe_add(&x3, &c);
     secp256k1_fe_normalize_weak(&x3);
     return secp256k1_fe_equal_var(&y2, &x3);
 }
 
-static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
+static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a) {
     /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate.
      *
      * Note that there is an implementation described at
      *     https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
      * which trades a multiply for a square, but in practice this is actually slower,
      * mainly because it requires more normalizations.
      */
     secp256k1_fe t1,t2,t3,t4;
-    /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
-     *  Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
-     *  y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
-     *
-     *  Having said this, if this function receives a point on a sextic twist, e.g. by
-     *  a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
-     *  since -6 does have a cube root mod p. For this point, this function will not set
-     *  the infinity flag even though the point doubles to infinity, and the result
-     *  point will be gibberish (z = 0 but infinity = 0).
-     */
-    r->infinity = a->infinity;
-    if (r->infinity) {
-        if (rzr != NULL) {
-            secp256k1_fe_set_int(rzr, 1);
-        }
-        return;
-    }
 
-    if (rzr != NULL) {
-        *rzr = a->y;
-        secp256k1_fe_normalize_weak(rzr);
-        secp256k1_fe_mul_int(rzr, 2);
-    }
+    VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
+    r->infinity = 0;
 
     secp256k1_fe_mul(&r->z, &a->z, &a->y);
     secp256k1_fe_mul_int(&r->z, 2);       /* Z' = 2*Y*Z (2) */
     secp256k1_fe_sqr(&t1, &a->x);
     secp256k1_fe_mul_int(&t1, 3);         /* T1 = 3*X^2 (3) */
     secp256k1_fe_sqr(&t2, &t1);           /* T2 = 9*X^4 (1) */
     secp256k1_fe_sqr(&t3, &a->y);
     secp256k1_fe_mul_int(&t3, 2);         /* T3 = 2*Y^2 (2) */
     secp256k1_fe_sqr(&t4, &t3);
     secp256k1_fe_mul_int(&t4, 2);         /* T4 = 8*Y^4 (2) */
     secp256k1_fe_mul(&t3, &t3, &a->x);    /* T3 = 2*X*Y^2 (1) */
     r->x = t3;
     secp256k1_fe_mul_int(&r->x, 4);       /* X' = 8*X*Y^2 (4) */
     secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
     secp256k1_fe_add(&r->x, &t2);         /* X' = 9*X^4 - 8*X*Y^2 (6) */
     secp256k1_fe_negate(&t2, &t2, 1);     /* T2 = -9*X^4 (2) */
     secp256k1_fe_mul_int(&t3, 6);         /* T3 = 12*X*Y^2 (6) */
     secp256k1_fe_add(&t3, &t2);           /* T3 = 12*X*Y^2 - 9*X^4 (8) */
     secp256k1_fe_mul(&r->y, &t1, &t3);    /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
     secp256k1_fe_negate(&t2, &t4, 2);     /* T2 = -8*Y^4 (3) */
     secp256k1_fe_add(&r->y, &t2);         /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
 }
 
-static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
-    VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
-    secp256k1_gej_double_var(r, a, rzr);
+static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
+    /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
+     *  Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
+     *  y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
+     *
+     *  Having said this, if this function receives a point on a sextic twist, e.g. by
+     *  a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6,
+     *  since -6 does have a cube root mod p. For this point, this function will not set
+     *  the infinity flag even though the point doubles to infinity, and the result
+     *  point will be gibberish (z = 0 but infinity = 0).
+     */
+    if (a->infinity) {
+        r->infinity = 1;
+        if (rzr != NULL) {
+            secp256k1_fe_set_int(rzr, 1);
+        }
+        return;
+    }
+
+    if (rzr != NULL) {
+        *rzr = a->y;
+        secp256k1_fe_normalize_weak(rzr);
+        secp256k1_fe_mul_int(rzr, 2);
+    }
+
+    secp256k1_gej_double_nonzero(r, a);
 }
 
 static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
     /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
     secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
 
     if (a->infinity) {
         VERIFY_CHECK(rzr == NULL);
         *r = *b;
         return;
     }
 
     if (b->infinity) {
         if (rzr != NULL) {
             secp256k1_fe_set_int(rzr, 1);
         }
         *r = *a;
         return;
     }
 
     r->infinity = 0;
     secp256k1_fe_sqr(&z22, &b->z);
     secp256k1_fe_sqr(&z12, &a->z);
     secp256k1_fe_mul(&u1, &a->x, &z22);
     secp256k1_fe_mul(&u2, &b->x, &z12);
     secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z);
     secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
     secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
     secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
     if (secp256k1_fe_normalizes_to_zero_var(&h)) {
         if (secp256k1_fe_normalizes_to_zero_var(&i)) {
             secp256k1_gej_double_var(r, a, rzr);
         } else {
             if (rzr != NULL) {
                 secp256k1_fe_set_int(rzr, 0);
             }
             r->infinity = 1;
         }
         return;
     }
     secp256k1_fe_sqr(&i2, &i);
     secp256k1_fe_sqr(&h2, &h);
     secp256k1_fe_mul(&h3, &h, &h2);
     secp256k1_fe_mul(&h, &h, &b->z);
     if (rzr != NULL) {
         *rzr = h;
     }
     secp256k1_fe_mul(&r->z, &a->z, &h);
     secp256k1_fe_mul(&t, &u1, &h2);
     r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
     secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
     secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
     secp256k1_fe_add(&r->y, &h3);
 }
 
 static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
     /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
     secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
     if (a->infinity) {
         VERIFY_CHECK(rzr == NULL);
         secp256k1_gej_set_ge(r, b);
         return;
     }
     if (b->infinity) {
         if (rzr != NULL) {
             secp256k1_fe_set_int(rzr, 1);
         }
         *r = *a;
         return;
     }
     r->infinity = 0;
 
     secp256k1_fe_sqr(&z12, &a->z);
     u1 = a->x; secp256k1_fe_normalize_weak(&u1);
     secp256k1_fe_mul(&u2, &b->x, &z12);
     s1 = a->y; secp256k1_fe_normalize_weak(&s1);
     secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
     secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
     secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
     if (secp256k1_fe_normalizes_to_zero_var(&h)) {
         if (secp256k1_fe_normalizes_to_zero_var(&i)) {
             secp256k1_gej_double_var(r, a, rzr);
         } else {
             if (rzr != NULL) {
                 secp256k1_fe_set_int(rzr, 0);
             }
             r->infinity = 1;
         }
         return;
     }
     secp256k1_fe_sqr(&i2, &i);
     secp256k1_fe_sqr(&h2, &h);
     secp256k1_fe_mul(&h3, &h, &h2);
     if (rzr != NULL) {
         *rzr = h;
     }
     secp256k1_fe_mul(&r->z, &a->z, &h);
     secp256k1_fe_mul(&t, &u1, &h2);
     r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
     secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
     secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
     secp256k1_fe_add(&r->y, &h3);
 }
 
 static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
     /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
     secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
 
     if (b->infinity) {
         *r = *a;
         return;
     }
     if (a->infinity) {
         secp256k1_fe bzinv2, bzinv3;
         r->infinity = b->infinity;
         secp256k1_fe_sqr(&bzinv2, bzinv);
         secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv);
         secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
         secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
         secp256k1_fe_set_int(&r->z, 1);
         return;
     }
     r->infinity = 0;
 
     /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
      *  secp256k1's isomorphism we can multiply the Z coordinates on both sides
      *  by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
      *  This means that (rx,ry,rz) can be calculated as
      *  (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
      *  The variable az below holds the modified Z coordinate for a, which is used
      *  for the computation of rx and ry, but not for rz.
      */
     secp256k1_fe_mul(&az, &a->z, bzinv);
 
     secp256k1_fe_sqr(&z12, &az);
     u1 = a->x; secp256k1_fe_normalize_weak(&u1);
     secp256k1_fe_mul(&u2, &b->x, &z12);
     s1 = a->y; secp256k1_fe_normalize_weak(&s1);
     secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az);
     secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
     secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
     if (secp256k1_fe_normalizes_to_zero_var(&h)) {
         if (secp256k1_fe_normalizes_to_zero_var(&i)) {
             secp256k1_gej_double_var(r, a, NULL);
         } else {
             r->infinity = 1;
         }
         return;
     }
     secp256k1_fe_sqr(&i2, &i);
     secp256k1_fe_sqr(&h2, &h);
     secp256k1_fe_mul(&h3, &h, &h2);
     r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h);
     secp256k1_fe_mul(&t, &u1, &h2);
     r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
     secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
     secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
     secp256k1_fe_add(&r->y, &h3);
 }
 
 
 static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) {
     /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
     static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
     secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
     secp256k1_fe m_alt, rr_alt;
     int infinity, degenerate;
     VERIFY_CHECK(!b->infinity);
     VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
 
     /** In:
      *    Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
      *    In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
      *  we find as solution for a unified addition/doubling formula:
      *    lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
      *    x3 = lambda^2 - (x1 + x2)
      *    2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
      *
      *  Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
      *    U1 = X1*Z2^2, U2 = X2*Z1^2
      *    S1 = Y1*Z2^3, S2 = Y2*Z1^3
      *    Z = Z1*Z2
      *    T = U1+U2
      *    M = S1+S2
      *    Q = T*M^2
      *    R = T^2-U1*U2
      *    X3 = 4*(R^2-Q)
      *    Y3 = 4*(R*(3*Q-2*R^2)-M^4)
      *    Z3 = 2*M*Z
      *  (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
      *
      *  This formula has the benefit of being the same for both addition
      *  of distinct points and doubling. However, it breaks down in the
      *  case that either point is infinity, or that y1 = -y2. We handle
      *  these cases in the following ways:
      *
      *    - If b is infinity we simply bail by means of a VERIFY_CHECK.
      *
      *    - If a is infinity, we detect this, and at the end of the
      *      computation replace the result (which will be meaningless,
      *      but we compute to be constant-time) with b.x : b.y : 1.
      *
      *    - If a = -b, we have y1 = -y2, which is a degenerate case.
      *      But here the answer is infinity, so we simply set the
      *      infinity flag of the result, overriding the computed values
      *      without even needing to cmov.
      *
      *    - If y1 = -y2 but x1 != x2, which does occur thanks to certain
      *      properties of our curve (specifically, 1 has nontrivial cube
      *      roots in our field, and the curve equation has no x coefficient)
      *      then the answer is not infinity but also not given by the above
      *      equation. In this case, we cmov in place an alternate expression
      *      for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
      *      expressions for lambda are defined, they are equal, and can be
      *      obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
      *      then substitution of x^3 + 7 for y^2 (using the curve equation).
      *      For all pairs of nonzero points (a, b) at least one is defined,
      *      so this covers everything.
      */
 
     secp256k1_fe_sqr(&zz, &a->z);                       /* z = Z1^2 */
     u1 = a->x; secp256k1_fe_normalize_weak(&u1);        /* u1 = U1 = X1*Z2^2 (1) */
     secp256k1_fe_mul(&u2, &b->x, &zz);                  /* u2 = U2 = X2*Z1^2 (1) */
     s1 = a->y; secp256k1_fe_normalize_weak(&s1);        /* s1 = S1 = Y1*Z2^3 (1) */
     secp256k1_fe_mul(&s2, &b->y, &zz);                  /* s2 = Y2*Z1^2 (1) */
     secp256k1_fe_mul(&s2, &s2, &a->z);                  /* s2 = S2 = Y2*Z1^3 (1) */
     t = u1; secp256k1_fe_add(&t, &u2);                  /* t = T = U1+U2 (2) */
     m = s1; secp256k1_fe_add(&m, &s2);                  /* m = M = S1+S2 (2) */
     secp256k1_fe_sqr(&rr, &t);                          /* rr = T^2 (1) */
     secp256k1_fe_negate(&m_alt, &u2, 1);                /* Malt = -X2*Z1^2 */
     secp256k1_fe_mul(&tt, &u1, &m_alt);                 /* tt = -U1*U2 (2) */
     secp256k1_fe_add(&rr, &tt);                         /* rr = R = T^2-U1*U2 (3) */
     /** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
      *  case that Z = z1z2 = 0, and this is special-cased later on). */
     degenerate = secp256k1_fe_normalizes_to_zero(&m) &
                  secp256k1_fe_normalizes_to_zero(&rr);
     /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
      * This means either x1 == beta*x2 or beta*x1 == x2, where beta is
      * a nontrivial cube root of one. In either case, an alternate
      * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
      * so we set R/M equal to this. */
     rr_alt = s1;
     secp256k1_fe_mul_int(&rr_alt, 2);       /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
     secp256k1_fe_add(&m_alt, &u1);          /* Malt = X1*Z2^2 - X2*Z1^2 */
 
     secp256k1_fe_cmov(&rr_alt, &rr, !degenerate);
     secp256k1_fe_cmov(&m_alt, &m, !degenerate);
     /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
      * From here on out Ralt and Malt represent the numerator
      * and denominator of lambda; R and M represent the explicit
      * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
     secp256k1_fe_sqr(&n, &m_alt);                       /* n = Malt^2 (1) */
     secp256k1_fe_mul(&q, &n, &t);                       /* q = Q = T*Malt^2 (1) */
     /* These two lines use the observation that either M == Malt or M == 0,
      * so M^3 * Malt is either Malt^4 (which is computed by squaring), or
      * zero (which is "computed" by cmov). So the cost is one squaring
      * versus two multiplications. */
     secp256k1_fe_sqr(&n, &n);
     secp256k1_fe_cmov(&n, &m, degenerate);              /* n = M^3 * Malt (2) */
     secp256k1_fe_sqr(&t, &rr_alt);                      /* t = Ralt^2 (1) */
     secp256k1_fe_mul(&r->z, &a->z, &m_alt);             /* r->z = Malt*Z (1) */
     infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
     secp256k1_fe_mul_int(&r->z, 2);                     /* r->z = Z3 = 2*Malt*Z (2) */
     secp256k1_fe_negate(&q, &q, 1);                     /* q = -Q (2) */
     secp256k1_fe_add(&t, &q);                           /* t = Ralt^2-Q (3) */
     secp256k1_fe_normalize_weak(&t);
     r->x = t;                                           /* r->x = Ralt^2-Q (1) */
     secp256k1_fe_mul_int(&t, 2);                        /* t = 2*x3 (2) */
     secp256k1_fe_add(&t, &q);                           /* t = 2*x3 - Q: (4) */
     secp256k1_fe_mul(&t, &t, &rr_alt);                  /* t = Ralt*(2*x3 - Q) (1) */
     secp256k1_fe_add(&t, &n);                           /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
     secp256k1_fe_negate(&r->y, &t, 3);                  /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
     secp256k1_fe_normalize_weak(&r->y);
     secp256k1_fe_mul_int(&r->x, 4);                     /* r->x = X3 = 4*(Ralt^2-Q) */
     secp256k1_fe_mul_int(&r->y, 4);                     /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
 
     /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
     secp256k1_fe_cmov(&r->x, &b->x, a->infinity);
     secp256k1_fe_cmov(&r->y, &b->y, a->infinity);
     secp256k1_fe_cmov(&r->z, &fe_1, a->infinity);
     r->infinity = infinity;
 }
 
 static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
     /* Operations: 4 mul, 1 sqr */
     secp256k1_fe zz;
     VERIFY_CHECK(!secp256k1_fe_is_zero(s));
     secp256k1_fe_sqr(&zz, s);
     secp256k1_fe_mul(&r->x, &r->x, &zz);                /* r->x *= s^2 */
     secp256k1_fe_mul(&r->y, &r->y, &zz);
     secp256k1_fe_mul(&r->y, &r->y, s);                  /* r->y *= s^3 */
     secp256k1_fe_mul(&r->z, &r->z, s);                  /* r->z *= s   */
 }
 
 static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) {
     secp256k1_fe x, y;
     VERIFY_CHECK(!a->infinity);
     x = a->x;
     secp256k1_fe_normalize(&x);
     y = a->y;
     secp256k1_fe_normalize(&y);
     secp256k1_fe_to_storage(&r->x, &x);
     secp256k1_fe_to_storage(&r->y, &y);
 }
 
 static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) {
     secp256k1_fe_from_storage(&r->x, &a->x);
     secp256k1_fe_from_storage(&r->y, &a->y);
     r->infinity = 0;
 }
 
 static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) {
     secp256k1_fe_storage_cmov(&r->x, &a->x, flag);
     secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
 }
 
 #ifdef USE_ENDOMORPHISM
 static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
     static const secp256k1_fe beta = SECP256K1_FE_CONST(
         0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
         0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
     );
     *r = *a;
     secp256k1_fe_mul(&r->x, &r->x, &beta);
 }
 #endif
 
 static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
     secp256k1_fe yz;
 
     if (a->infinity) {
         return 0;
     }
 
     /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as
      * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z
        is */
     secp256k1_fe_mul(&yz, &a->y, &a->z);
     return secp256k1_fe_is_quad_var(&yz);
 }
 
 #endif /* SECP256K1_GROUP_IMPL_H */
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
index b44e357cb6..8cca1cef21 100644
--- a/src/secp256k1/src/tests_exhaustive.c
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -1,511 +1,511 @@
 /***********************************************************************
  * Copyright (c) 2016 Andrew Poelstra                                 *
  * Distributed under the MIT software license, see the accompanying   *
  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
  **********************************************************************/
 
 #if defined HAVE_CONFIG_H
 #include "libsecp256k1-config.h"
 #endif
 
 #include <stdio.h>
 #include <stdlib.h>
 
 #include <time.h>
 
 #undef USE_ECMULT_STATIC_PRECOMPUTATION
 
 #ifndef EXHAUSTIVE_TEST_ORDER
 /* see group_impl.h for allowable values */
 #define EXHAUSTIVE_TEST_ORDER 13
 #define EXHAUSTIVE_TEST_LAMBDA 9   /* cube root of 1 mod 13 */
 #endif
 
 #include "include/secp256k1.h"
 #include "group.h"
 #include "secp256k1.c"
 #include "testrand_impl.h"
 
 #ifdef ENABLE_MODULE_RECOVERY
 #include "src/modules/recovery/main_impl.h"
 #include "include/secp256k1_recovery.h"
 #endif
 
 /** stolen from tests.c */
 void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
     CHECK(a->infinity == b->infinity);
     if (a->infinity) {
         return;
     }
     CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
     CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
 }
 
 void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
     secp256k1_fe z2s;
     secp256k1_fe u1, u2, s1, s2;
     CHECK(a->infinity == b->infinity);
     if (a->infinity) {
         return;
     }
     /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
     secp256k1_fe_sqr(&z2s, &b->z);
     secp256k1_fe_mul(&u1, &a->x, &z2s);
     u2 = b->x; secp256k1_fe_normalize_weak(&u2);
     secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
     s2 = b->y; secp256k1_fe_normalize_weak(&s2);
     CHECK(secp256k1_fe_equal_var(&u1, &u2));
     CHECK(secp256k1_fe_equal_var(&s1, &s2));
 }
 
 void random_fe(secp256k1_fe *x) {
     unsigned char bin[32];
     do {
         secp256k1_rand256(bin);
         if (secp256k1_fe_set_b32(x, bin)) {
             return;
         }
     } while(1);
 }
 /** END stolen from tests.c */
 
 int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
                                       const unsigned char *key32, const unsigned char *algo16,
                                       void *data, unsigned int attempt) {
     secp256k1_scalar s;
     int *idata = data;
     (void)msg32;
     (void)key32;
     (void)algo16;
     /* Some nonces cannot be used because they'd cause s and/or r to be zero.
      * The signing function has retry logic here that just re-calls the nonce
      * function with an increased `attempt`. So if attempt > 0 this means we
      * need to change the nonce to avoid an infinite loop. */
     if (attempt > 0) {
         *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER;
     }
     secp256k1_scalar_set_int(&s, *idata);
     secp256k1_scalar_get_b32(nonce32, &s);
     return 1;
 }
 
 #ifdef USE_ENDOMORPHISM
 void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
     int i;
     for (i = 0; i < order; i++) {
         secp256k1_ge res;
         secp256k1_ge_mul_lambda(&res, &group[i]);
         ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
     }
 }
 #endif
 
 void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
     int i, j;
 
     /* Sanity-check (and check infinity functions) */
     CHECK(secp256k1_ge_is_infinity(&group[0]));
     CHECK(secp256k1_gej_is_infinity(&groupj[0]));
     for (i = 1; i < order; i++) {
         CHECK(!secp256k1_ge_is_infinity(&group[i]));
         CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
     }
 
     /* Check all addition formulae */
     for (j = 0; j < order; j++) {
         secp256k1_fe fe_inv;
         secp256k1_fe_inv(&fe_inv, &groupj[j].z);
         for (i = 0; i < order; i++) {
             secp256k1_ge zless_gej;
             secp256k1_gej tmp;
             /* add_var */
             secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
             ge_equals_gej(&group[(i + j) % order], &tmp);
             /* add_ge */
             if (j > 0) {
                 secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
                 ge_equals_gej(&group[(i + j) % order], &tmp);
             }
             /* add_ge_var */
             secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
             ge_equals_gej(&group[(i + j) % order], &tmp);
             /* add_zinv_var */
             zless_gej.infinity = groupj[j].infinity;
             zless_gej.x = groupj[j].x;
             zless_gej.y = groupj[j].y;
             secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
             ge_equals_gej(&group[(i + j) % order], &tmp);
         }
     }
 
     /* Check doubling */
     for (i = 0; i < order; i++) {
         secp256k1_gej tmp;
         if (i > 0) {
-            secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL);
+            secp256k1_gej_double_nonzero(&tmp, &groupj[i]);
             ge_equals_gej(&group[(2 * i) % order], &tmp);
         }
         secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
         ge_equals_gej(&group[(2 * i) % order], &tmp);
     }
 
     /* Check negation */
     for (i = 1; i < order; i++) {
         secp256k1_ge tmp;
         secp256k1_gej tmpj;
         secp256k1_ge_neg(&tmp, &group[i]);
         ge_equals_ge(&group[order - i], &tmp);
         secp256k1_gej_neg(&tmpj, &groupj[i]);
         ge_equals_gej(&group[order - i], &tmpj);
     }
 }
 
 void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
     int i, j, r_log;
     for (r_log = 1; r_log < order; r_log++) {
         for (j = 0; j < order; j++) {
             for (i = 0; i < order; i++) {
                 secp256k1_gej tmp;
                 secp256k1_scalar na, ng;
                 secp256k1_scalar_set_int(&na, i);
                 secp256k1_scalar_set_int(&ng, j);
 
                 secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
                 ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
 
                 if (i > 0) {
                     secp256k1_ecmult_const(&tmp, &group[i], &ng, 256);
                     ge_equals_gej(&group[(i * j) % order], &tmp);
                 }
             }
         }
     }
 }
 
 typedef struct {
     secp256k1_scalar sc[2];
     secp256k1_ge pt[2];
 } ecmult_multi_data;
 
 static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) {
     ecmult_multi_data *data = (ecmult_multi_data*) cbdata;
     *sc = data->sc[idx];
     *pt = data->pt[idx];
     return 1;
 }
 
 void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
     int i, j, k, x, y;
     secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096);
     for (i = 0; i < order; i++) {
         for (j = 0; j < order; j++) {
             for (k = 0; k < order; k++) {
                 for (x = 0; x < order; x++) {
                     for (y = 0; y < order; y++) {
                         secp256k1_gej tmp;
                         secp256k1_scalar g_sc;
                         ecmult_multi_data data;
 
                         secp256k1_scalar_set_int(&data.sc[0], i);
                         secp256k1_scalar_set_int(&data.sc[1], j);
                         secp256k1_scalar_set_int(&g_sc, k);
                         data.pt[0] = group[x];
                         data.pt[1] = group[y];
 
                         secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
                         ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp);
                     }
                 }
             }
         }
     }
     secp256k1_scratch_destroy(&ctx->error_callback, scratch);
 }
 
 void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
     secp256k1_fe x;
     unsigned char x_bin[32];
     k %= EXHAUSTIVE_TEST_ORDER;
     x = group[k].x;
     secp256k1_fe_normalize(&x);
     secp256k1_fe_get_b32(x_bin, &x);
     secp256k1_scalar_set_b32(r, x_bin, NULL);
 }
 
 void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
     int s, r, msg, key;
     for (s = 1; s < order; s++) {
         for (r = 1; r < order; r++) {
             for (msg = 1; msg < order; msg++) {
                 for (key = 1; key < order; key++) {
                     secp256k1_ge nonconst_ge;
                     secp256k1_ecdsa_signature sig;
                     secp256k1_pubkey pk;
                     secp256k1_scalar sk_s, msg_s, r_s, s_s;
                     secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
                     int k, should_verify;
                     unsigned char msg32[32];
 
                     secp256k1_scalar_set_int(&s_s, s);
                     secp256k1_scalar_set_int(&r_s, r);
                     secp256k1_scalar_set_int(&msg_s, msg);
                     secp256k1_scalar_set_int(&sk_s, key);
 
                     /* Verify by hand */
                     /* Run through every k value that gives us this r and check that *one* works.
                      * Note there could be none, there could be multiple, ECDSA is weird. */
                     should_verify = 0;
                     for (k = 0; k < order; k++) {
                         secp256k1_scalar check_x_s;
                         r_from_k(&check_x_s, group, k);
                         if (r_s == check_x_s) {
                             secp256k1_scalar_set_int(&s_times_k_s, k);
                             secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
                             secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
                             secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
                             should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
                         }
                     }
                     /* nb we have a "high s" rule */
                     should_verify &= !secp256k1_scalar_is_high(&s_s);
 
                     /* Verify by calling verify */
                     secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s);
                     memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
                     secp256k1_pubkey_save(&pk, &nonconst_ge);
                     secp256k1_scalar_get_b32(msg32, &msg_s);
                     CHECK(should_verify ==
                           secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
                 }
             }
         }
     }
 }
 
 void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
     int i, j, k;
 
     /* Loop */
     for (i = 1; i < order; i++) {  /* message */
         for (j = 1; j < order; j++) {  /* key */
             for (k = 1; k < order; k++) {  /* nonce */
                 const int starting_k = k;
                 secp256k1_ecdsa_signature sig;
                 secp256k1_scalar sk, msg, r, s, expected_r;
                 unsigned char sk32[32], msg32[32];
                 secp256k1_scalar_set_int(&msg, i);
                 secp256k1_scalar_set_int(&sk, j);
                 secp256k1_scalar_get_b32(sk32, &sk);
                 secp256k1_scalar_get_b32(msg32, &msg);
 
                 secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
 
                 secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
                 /* Note that we compute expected_r *after* signing -- this is important
                  * because our nonce-computing function function might change k during
                  * signing. */
                 r_from_k(&expected_r, group, k);
                 CHECK(r == expected_r);
                 CHECK((k * s) % order == (i + r * j) % order ||
                       (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
 
                 /* Overflow means we've tried every possible nonce */
                 if (k < starting_k) {
                     break;
                 }
             }
         }
     }
 
     /* We would like to verify zero-knowledge here by counting how often every
      * possible (s, r) tuple appears, but because the group order is larger
      * than the field order, when coercing the x-values to scalar values, some
      * appear more often than others, so we are actually not zero-knowledge.
      * (This effect also appears in the real code, but the difference is on the
      * order of 1/2^128th the field order, so the deviation is not useful to a
      * computationally bounded attacker.)
      */
 }
 
 #ifdef ENABLE_MODULE_RECOVERY
 void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
     int i, j, k;
 
     /* Loop */
     for (i = 1; i < order; i++) {  /* message */
         for (j = 1; j < order; j++) {  /* key */
             for (k = 1; k < order; k++) {  /* nonce */
                 const int starting_k = k;
                 secp256k1_fe r_dot_y_normalized;
                 secp256k1_ecdsa_recoverable_signature rsig;
                 secp256k1_ecdsa_signature sig;
                 secp256k1_scalar sk, msg, r, s, expected_r;
                 unsigned char sk32[32], msg32[32];
                 int expected_recid;
                 int recid;
                 secp256k1_scalar_set_int(&msg, i);
                 secp256k1_scalar_set_int(&sk, j);
                 secp256k1_scalar_get_b32(sk32, &sk);
                 secp256k1_scalar_get_b32(msg32, &msg);
 
                 secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
 
                 /* Check directly */
                 secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
                 r_from_k(&expected_r, group, k);
                 CHECK(r == expected_r);
                 CHECK((k * s) % order == (i + r * j) % order ||
                       (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
                 /* In computing the recid, there is an overflow condition that is disabled in
                  * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
                  * will exceed the group order, and our signing code always holds out for r
                  * values that don't overflow, so with a proper overflow check the tests would
                  * loop indefinitely. */
                 r_dot_y_normalized = group[k].y;
                 secp256k1_fe_normalize(&r_dot_y_normalized);
                 /* Also the recovery id is flipped depending if we hit the low-s branch */
                 if ((k * s) % order == (i + r * j) % order) {
                     expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
                 } else {
                     expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
                 }
                 CHECK(recid == expected_recid);
 
                 /* Convert to a standard sig then check */
                 secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
                 secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
                 /* Note that we compute expected_r *after* signing -- this is important
                  * because our nonce-computing function function might change k during
                  * signing. */
                 r_from_k(&expected_r, group, k);
                 CHECK(r == expected_r);
                 CHECK((k * s) % order == (i + r * j) % order ||
                       (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
 
                 /* Overflow means we've tried every possible nonce */
                 if (k < starting_k) {
                     break;
                 }
             }
         }
     }
 }
 
 void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
     /* This is essentially a copy of test_exhaustive_verify, with recovery added */
     int s, r, msg, key;
     for (s = 1; s < order; s++) {
         for (r = 1; r < order; r++) {
             for (msg = 1; msg < order; msg++) {
                 for (key = 1; key < order; key++) {
                     secp256k1_ge nonconst_ge;
                     secp256k1_ecdsa_recoverable_signature rsig;
                     secp256k1_ecdsa_signature sig;
                     secp256k1_pubkey pk;
                     secp256k1_scalar sk_s, msg_s, r_s, s_s;
                     secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
                     int recid = 0;
                     int k, should_verify;
                     unsigned char msg32[32];
 
                     secp256k1_scalar_set_int(&s_s, s);
                     secp256k1_scalar_set_int(&r_s, r);
                     secp256k1_scalar_set_int(&msg_s, msg);
                     secp256k1_scalar_set_int(&sk_s, key);
                     secp256k1_scalar_get_b32(msg32, &msg_s);
 
                     /* Verify by hand */
                     /* Run through every k value that gives us this r and check that *one* works.
                      * Note there could be none, there could be multiple, ECDSA is weird. */
                     should_verify = 0;
                     for (k = 0; k < order; k++) {
                         secp256k1_scalar check_x_s;
                         r_from_k(&check_x_s, group, k);
                         if (r_s == check_x_s) {
                             secp256k1_scalar_set_int(&s_times_k_s, k);
                             secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
                             secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
                             secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
                             should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
                         }
                     }
                     /* nb we have a "high s" rule */
                     should_verify &= !secp256k1_scalar_is_high(&s_s);
 
                     /* We would like to try recovering the pubkey and checking that it matches,
                      * but pubkey recovery is impossible in the exhaustive tests (the reason
                      * being that there are 12 nonzero r values, 12 nonzero points, and no
                      * overlap between the sets, so there are no valid signatures). */
 
                     /* Verify by converting to a standard signature and calling verify */
                     secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
                     secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
                     memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
                     secp256k1_pubkey_save(&pk, &nonconst_ge);
                     CHECK(should_verify ==
                           secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
                 }
             }
         }
     }
 }
 #endif
 
 int main(void) {
     int i;
     secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
     secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
 
     /* Build context */
     secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
 
     /* TODO set z = 1, then do num_tests runs with random z values */
 
     /* Generate the entire group */
     secp256k1_gej_set_infinity(&groupj[0]);
     secp256k1_ge_set_gej(&group[0], &groupj[0]);
     for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
         /* Set a different random z-value for each Jacobian point */
         secp256k1_fe z;
         random_fe(&z);
 
         secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
         secp256k1_ge_set_gej(&group[i], &groupj[i]);
         secp256k1_gej_rescale(&groupj[i], &z);
 
         /* Verify against ecmult_gen */
         {
             secp256k1_scalar scalar_i;
             secp256k1_gej generatedj;
             secp256k1_ge generated;
 
             secp256k1_scalar_set_int(&scalar_i, i);
             secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
             secp256k1_ge_set_gej(&generated, &generatedj);
 
             CHECK(group[i].infinity == 0);
             CHECK(generated.infinity == 0);
             CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
             CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
         }
     }
 
     /* Run the tests */
 #ifdef USE_ENDOMORPHISM
     test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
 #endif
     test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
     test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
     test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER);
     test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
     test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
 
 #ifdef ENABLE_MODULE_RECOVERY
     test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
     test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
 #endif
 
     secp256k1_context_destroy(ctx);
     return 0;
 }
 
diff --git a/src/secp256k1/src/util.h b/src/secp256k1/src/util.h
index 9deb61bc59..d5fa39c02c 100644
--- a/src/secp256k1/src/util.h
+++ b/src/secp256k1/src/util.h
@@ -1,162 +1,163 @@
 /**********************************************************************
  * Copyright (c) 2013, 2014 Pieter Wuille                             *
  * Distributed under the MIT software license, see the accompanying   *
  * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
  **********************************************************************/
 
 #ifndef SECP256K1_UTIL_H
 #define SECP256K1_UTIL_H
 
 #if defined HAVE_CONFIG_H
 #include "libsecp256k1-config.h"
 #endif
 
 #include <stdlib.h>
 #include <stdint.h>
 #include <stdio.h>
+#include <limits.h>
 
 typedef struct {
     void (*fn)(const char *text, void* data);
     const void* data;
 } secp256k1_callback;
 
 static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * const cb, const char * const text) {
     cb->fn(text, (void*)cb->data);
 }
 
 #ifdef DETERMINISTIC
 #define TEST_FAILURE(msg) do { \
     fprintf(stderr, "%s\n", msg); \
     abort(); \
 } while(0);
 #else
 #define TEST_FAILURE(msg) do { \
     fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \
     abort(); \
 } while(0)
 #endif
 
 #if SECP256K1_GNUC_PREREQ(3, 0)
 #define EXPECT(x,c) __builtin_expect((x),(c))
 #else
 #define EXPECT(x,c) (x)
 #endif
 
 #ifdef DETERMINISTIC
 #define CHECK(cond) do { \
     if (EXPECT(!(cond), 0)) { \
         TEST_FAILURE("test condition failed"); \
     } \
 } while(0)
 #else
 #define CHECK(cond) do { \
     if (EXPECT(!(cond), 0)) { \
         TEST_FAILURE("test condition failed: " #cond); \
     } \
 } while(0)
 #endif
 
 /* Like assert(), but when VERIFY is defined, and side-effect safe. */
 #if defined(COVERAGE)
 #define VERIFY_CHECK(check)
 #define VERIFY_SETUP(stmt)
 #elif defined(VERIFY)
 #define VERIFY_CHECK CHECK
 #define VERIFY_SETUP(stmt) do { stmt; } while(0)
 #else
 #define VERIFY_CHECK(cond) do { (void)(cond); } while(0)
 #define VERIFY_SETUP(stmt)
 #endif
 
 static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {
     void *ret = malloc(size);
     if (ret == NULL) {
         secp256k1_callback_call(cb, "Out of memory");
     }
     return ret;
 }
 
 static SECP256K1_INLINE void *checked_realloc(const secp256k1_callback* cb, void *ptr, size_t size) {
     void *ret = realloc(ptr, size);
     if (ret == NULL) {
         secp256k1_callback_call(cb, "Out of memory");
     }
     return ret;
 }
 
 #if defined(__BIGGEST_ALIGNMENT__)
 #define ALIGNMENT __BIGGEST_ALIGNMENT__
 #else
 /* Using 16 bytes alignment because common architectures never have alignment
  * requirements above 8 for any of the types we care about. In addition we
  * leave some room because currently we don't care about a few bytes. */
 #define ALIGNMENT 16
 #endif
 
 #define ROUND_TO_ALIGN(size) (((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT)
 
 /* Assume there is a contiguous memory object with bounds [base, base + max_size)
  * of which the memory range [base, *prealloc_ptr) is already allocated for usage,
  * where *prealloc_ptr is an aligned pointer. In that setting, this functions
  * reserves the subobject [*prealloc_ptr, *prealloc_ptr + alloc_size) of
  * alloc_size bytes by increasing *prealloc_ptr accordingly, taking into account
  * alignment requirements.
  *
  * The function returns an aligned pointer to the newly allocated subobject.
  *
  * This is useful for manual memory management: if we're simply given a block
  * [base, base + max_size), the caller can use this function to allocate memory
  * in this block and keep track of the current allocation state with *prealloc_ptr.
  *
  * It is VERIFY_CHECKed that there is enough space left in the memory object and
  * *prealloc_ptr is aligned relative to base.
  */
 static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_size, void* base, size_t max_size) {
     size_t aligned_alloc_size = ROUND_TO_ALIGN(alloc_size);
     void* ret;
     VERIFY_CHECK(prealloc_ptr != NULL);
     VERIFY_CHECK(*prealloc_ptr != NULL);
     VERIFY_CHECK(base != NULL);
     VERIFY_CHECK((unsigned char*)*prealloc_ptr >= (unsigned char*)base);
     VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0);
     VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size);
     ret = *prealloc_ptr;
     *((unsigned char**)prealloc_ptr) += aligned_alloc_size;
     return ret;
 }
 
 /* Macro for restrict, when available and not in a VERIFY build. */
 #if defined(SECP256K1_BUILD) && defined(VERIFY)
 # define SECP256K1_RESTRICT
 #else
 # if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
 #  if SECP256K1_GNUC_PREREQ(3,0)
 #   define SECP256K1_RESTRICT __restrict__
 #  elif (defined(_MSC_VER) && _MSC_VER >= 1400)
 #   define SECP256K1_RESTRICT __restrict
 #  else
 #   define SECP256K1_RESTRICT
 #  endif
 # else
 #  define SECP256K1_RESTRICT restrict
 # endif
 #endif
 
 #if defined(_WIN32)
 # define I64FORMAT "I64d"
 # define I64uFORMAT "I64u"
 #else
 # define I64FORMAT "lld"
 # define I64uFORMAT "llu"
 #endif
 
 #if defined(HAVE___INT128)
 # if defined(__GNUC__)
 #  define SECP256K1_GNUC_EXT __extension__
 # else
 #  define SECP256K1_GNUC_EXT
 # endif
 SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
 #endif
 
 #endif /* SECP256K1_UTIL_H */