Page Menu
Home
Phabricator
Search
Configure Global Search
Log In
Files
F13115316
D8044.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
36 KB
Subscribers
None
D8044.diff
View Options
diff --git a/contrib/teamcity/build-configurations.yml b/contrib/teamcity/build-configurations.yml
--- a/contrib/teamcity/build-configurations.yml
+++ b/contrib/teamcity/build-configurations.yml
@@ -271,13 +271,6 @@
- secp256k1
timeout: 600
- build-secp256k1-without-endomorphism:
- cmake_flags:
- - "-DSECP256K1_ENABLE_ENDOMORPHISM=OFF"
- templates:
- - secp256k1
- timeout: 600
-
build-secp256k1-java:
cmake_flags:
- '-DSECP256K1_ENABLE_MODULE_ECDH=ON'
diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml
--- a/src/secp256k1/.travis.yml
+++ b/src/secp256k1/.travis.yml
@@ -29,7 +29,6 @@
global:
- WIDEMUL=auto
- BIGNUM=gmp
- - ENDOMORPHISM=no
- STATICPRECOMPUTATION=yes
- ECMULTGENPRECISION=auto
- ASM=no
@@ -54,15 +53,12 @@
jobs:
- WIDEMUL=int64 RECOVERY=yes
- WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes
- - WIDEMUL=int64 ENDOMORPHISM=yes
- WIDEMUL=int128
- WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes
- - WIDEMUL=int128 ENDOMORPHISM=yes
- - WIDEMUL=int128 ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes
+ - WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes
- WIDEMUL=int128 ASM=x86_64
- - WIDEMUL=int128 ENDOMORPHISM=yes ASM=x86_64
- BIGNUM=no
- - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes
+ - BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes
- BIGNUM=no STATICPRECOMPUTATION=no
- AUTOTOOLS_TARGET=distcheck CMAKE_TARGET=install WITH_VALGRIND=no CTIMETEST=no BENCH=no
- AUTOTOOLS_EXTRA_FLAGS=CPPFLAGS=-DDETERMINISTIC CMAKE_EXTRA_FLAGS=-DCMAKE_C_FLAGS=-DDETERMINISTIC
@@ -71,12 +67,7 @@
- ECMULTGENPRECISION=2
- ECMULTGENPRECISION=8
- RUN_VALGRIND=yes
- BIGNUM=no ENDOMORPHISM=yes ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes OPENSSL_TESTS=no MULTISET=yes
- AUTOTOOLS_TARGET=
- CMAKE_EXTRA_FLAGS=-DCMAKE_C_FLAGS=-DVALGRIND CMAKE_TARGET="secp256k1-tests secp256k1-exhaustive_tests"
- # The same as above but without endomorphism.
- - RUN_VALGRIND=yes
- BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes OPENSSL_TESTS=no MULTISET=yes
+ BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes OPENSSL_TESTS=no MULTISET=yes
AUTOTOOLS_TARGET=
CMAKE_EXTRA_FLAGS=-DCMAKE_C_FLAGS=-DVALGRIND CMAKE_TARGET="secp256k1-tests secp256k1-exhaustive_tests"
- SCHNORR=no
@@ -85,13 +76,13 @@
include:
- compiler: clang
os: linux
- env: HOST=i686-linux-gnu ENDOMORPHISM=yes OPENSSL_TESTS=no
+ env: HOST=i686-linux-gnu OPENSSL_TESTS=no
- compiler: clang
os: linux
env: HOST=i686-linux-gnu BIGNUM=no OPENSSL_TESTS=no
- compiler: gcc
os: linux
- env: HOST=i686-linux-gnu ENDOMORPHISM=yes BIGNUM=no OPENSSL_TESTS=no
+ env: HOST=i686-linux-gnu BIGNUM=no OPENSSL_TESTS=no
- compiler: gcc
os: linux
env: HOST=i686-linux-gnu OPENSSL_TESTS=no
diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt
--- a/src/secp256k1/CMakeLists.txt
+++ b/src/secp256k1/CMakeLists.txt
@@ -251,12 +251,6 @@
set(USE_EXTERNAL_DEFAULT_CALLBACKS 1)
endif()
-# Endomorphism
-option(SECP256K1_ENABLE_ENDOMORPHISM "Enable endomorphism" ON)
-if(SECP256K1_ENABLE_ENDOMORPHISM)
- set(USE_ENDOMORPHISM 1)
-endif()
-
# Make the emult window size customizable.
set(SECP256K1_ECMULT_WINDOW_SIZE 15 CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24].")
if(${SECP256K1_ECMULT_WINDOW_SIZE} LESS 2 OR ${SECP256K1_ECMULT_WINDOW_SIZE} GREATER 24)
diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md
--- a/src/secp256k1/README.md
+++ b/src/secp256k1/README.md
@@ -52,7 +52,7 @@
* Use wNAF notation for point multiplicands.
* Use a much larger window for multiples of G, using precomputed multiples.
* Use Shamir's trick to do the multiplication with the public key and the generator simultaneously.
- * Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
+ * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones.
* Point multiplication for signing
* Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions.
* Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains)
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -116,11 +116,6 @@
[use_exhaustive_tests=$enableval],
[use_exhaustive_tests=yes])
-AC_ARG_ENABLE(endomorphism,
- AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]),
- [use_endomorphism=$enableval],
- [use_endomorphism=no])
-
AC_ARG_ENABLE(ecmult_static_precomputation,
AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]),
[use_ecmult_static_precomputation=$enableval],
@@ -179,8 +174,7 @@
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
-[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
-[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.]
+[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
)],
[req_ecmult_window=$withval], [req_ecmult_window=auto])
@@ -467,10 +461,6 @@
SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS"
fi
-if test x"$use_endomorphism" = x"yes"; then
- AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
-fi
-
if test x"$set_precomp" = x"yes"; then
AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table])
fi
@@ -567,7 +557,6 @@
echo
echo "Build Options:"
-echo " with endomorphism = $use_endomorphism"
echo " with ecmult precomp = $set_precomp"
echo " with external callbacks = $use_external_default_callbacks"
echo " with jni = $use_jni"
diff --git a/src/secp256k1/src/basic-config.h b/src/secp256k1/src/basic-config.h
--- a/src/secp256k1/src/basic-config.h
+++ b/src/secp256k1/src/basic-config.h
@@ -11,7 +11,6 @@
#undef USE_ASM_X86_64
#undef USE_ECMULT_STATIC_PRECOMPUTATION
-#undef USE_ENDOMORPHISM
#undef USE_EXTERNAL_ASM
#undef USE_EXTERNAL_DEFAULT_CALLBACKS
#undef USE_FIELD_INV_BUILTIN
diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c
--- a/src/secp256k1/src/bench_internal.c
+++ b/src/secp256k1/src/bench_internal.c
@@ -117,7 +117,6 @@
}
}
-#ifdef USE_ENDOMORPHISM
void bench_scalar_split(void* arg, int iters) {
int i, j = 0;
bench_inv *data = (bench_inv*)arg;
@@ -128,7 +127,6 @@
}
CHECK(j <= iters);
}
-#endif
void bench_scalar_inverse(void* arg, int iters) {
int i, j = 0;
@@ -397,9 +395,7 @@
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10);
-#ifdef USE_ENDOMORPHISM
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters);
-#endif
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000);
if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000);
diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h
--- a/src/secp256k1/src/ecmult.h
+++ b/src/secp256k1/src/ecmult.h
@@ -15,9 +15,7 @@
typedef struct {
/* For accelerating the computation of a*P + b*G: */
secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */
-#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */
-#endif
} secp256k1_ecmult_context;
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h
--- a/src/secp256k1/src/ecmult_const_impl.h
+++ b/src/secp256k1/src/ecmult_const_impl.h
@@ -140,19 +140,16 @@
secp256k1_fe Z;
int skew_1;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
secp256k1_scalar q_1, q_lam;
-#endif
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
/* build wnaf representation for q. */
int rsize = size;
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
@@ -160,12 +157,9 @@
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
-#endif
{
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
-#ifdef USE_ENDOMORPHISM
skew_lam = 0;
-#endif
}
/* Calculate odd multiples of a.
@@ -179,14 +173,12 @@
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
}
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
}
-#endif
/* first loop iteration (separated out so we can directly set r, rather
* than having it start at infinity, get doubled several times, then have
@@ -195,14 +187,12 @@
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
}
-#endif
/* remaining loop iterations */
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
@@ -215,14 +205,12 @@
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
}
-#endif
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
@@ -231,43 +219,35 @@
/* Correct for wNAF skew */
secp256k1_ge correction = *a;
secp256k1_ge_storage correction_1_stor;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge_storage correction_lam_stor;
-#endif
secp256k1_ge_storage a2_stor;
secp256k1_gej tmpj;
secp256k1_gej_set_ge(&tmpj, &correction);
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
secp256k1_ge_set_gej(&correction, &tmpj);
secp256k1_ge_to_storage(&correction_1_stor, a);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_to_storage(&correction_lam_stor, a);
}
-#endif
secp256k1_ge_to_storage(&a2_stor, &correction);
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
}
-#endif
/* Apply the correction */
secp256k1_ge_from_storage(&correction, &correction_1_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
-#ifdef USE_ENDOMORPHISM
if (size > 128) {
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
secp256k1_ge_neg(&correction, &correction);
secp256k1_ge_mul_lambda(&correction, &correction);
secp256k1_gej_add_ge(r, r, &correction);
}
-#endif
}
}
diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h
--- a/src/secp256k1/src/ecmult_impl.h
+++ b/src/secp256k1/src/ecmult_impl.h
@@ -38,8 +38,8 @@
* (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
* where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
* be larger due to platform-specific padding and alignment.
- * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
- * two tables of this size are used instead of only one.
+ * Two tables of this size are used (due to the endomorphism
+ * optimization).
*/
# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
@@ -59,11 +59,7 @@
# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
-#ifdef USE_ENDOMORPHISM
- #define WNAF_BITS 128
-#else
- #define WNAF_BITS 256
-#endif
+#define WNAF_BITS 128
#define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
@@ -77,17 +73,9 @@
#define PIPPENGER_MAX_BUCKET_WINDOW 12
/* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
-#ifdef USE_ENDOMORPHISM
- #define ECMULT_PIPPENGER_THRESHOLD 88
-#else
- #define ECMULT_PIPPENGER_THRESHOLD 160
-#endif
+#define ECMULT_PIPPENGER_THRESHOLD 88
-#ifdef USE_ENDOMORPHISM
- #define ECMULT_MAX_POINTS_PER_BATCH 5000000
-#else
- #define ECMULT_MAX_POINTS_PER_BATCH 10000000
-#endif
+#define ECMULT_MAX_POINTS_PER_BATCH 5000000
/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
* the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
@@ -313,16 +301,12 @@
static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
-#ifdef USE_ENDOMORPHISM
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
-#endif
;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
-#ifdef USE_ENDOMORPHISM
ctx->pre_g_128 = NULL;
-#endif
}
static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
@@ -347,7 +331,6 @@
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
-#ifdef USE_ENDOMORPHISM
{
secp256k1_gej g_128j;
int i;
@@ -364,7 +347,6 @@
}
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
}
-#endif
}
static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
@@ -372,11 +354,9 @@
/* We cast to void* first to suppress a -Wcast-align warning. */
dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
-#ifdef USE_ENDOMORPHISM
if (src->pre_g_128 != NULL) {
dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
-#endif
}
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
@@ -447,16 +427,11 @@
}
struct secp256k1_strauss_point_state {
-#ifdef USE_ENDOMORPHISM
secp256k1_scalar na_1, na_lam;
int wnaf_na_1[130];
int wnaf_na_lam[130];
int bits_na_1;
int bits_na_lam;
-#else
- int wnaf_na[256];
- int bits_na;
-#endif
size_t input_pos;
};
@@ -464,26 +439,19 @@
secp256k1_gej* prej;
secp256k1_fe* zr;
secp256k1_ge* pre_a;
-#ifdef USE_ENDOMORPHISM
secp256k1_ge* pre_a_lam;
-#endif
struct secp256k1_strauss_point_state* ps;
};
static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
secp256k1_ge tmpa;
secp256k1_fe Z;
-#ifdef USE_ENDOMORPHISM
/* Splitted G factors. */
secp256k1_scalar ng_1, ng_128;
int wnaf_ng_1[129];
int bits_ng_1 = 0;
int wnaf_ng_128[129];
int bits_ng_128 = 0;
-#else
- int wnaf_ng[256];
- int bits_ng = 0;
-#endif
int i;
int bits = 0;
int np;
@@ -494,7 +462,6 @@
continue;
}
state->ps[no].input_pos = np;
-#ifdef USE_ENDOMORPHISM
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
@@ -509,13 +476,6 @@
if (state->ps[no].bits_na_lam > bits) {
bits = state->ps[no].bits_na_lam;
}
-#else
- /* build wnaf representation for na. */
- state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A);
- if (state->ps[no].bits_na > bits) {
- bits = state->ps[no].bits_na;
- }
-#endif
++no;
}
@@ -547,7 +507,6 @@
secp256k1_fe_set_int(&Z, 1);
}
-#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]);
@@ -568,21 +527,12 @@
bits = bits_ng_128;
}
}
-#else
- if (ng) {
- bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
- if (bits_ng > bits) {
- bits = bits_ng;
- }
- }
-#endif
secp256k1_gej_set_infinity(r);
for (i = bits - 1; i >= 0; i--) {
int n;
secp256k1_gej_double_var(r, r, NULL);
-#ifdef USE_ENDOMORPHISM
for (np = 0; np < no; ++np) {
if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
@@ -601,18 +551,6 @@
ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
}
-#else
- for (np = 0; np < no; ++np) {
- if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) {
- ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
- secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
- }
- }
- if (i < bits_ng && (n = wnaf_ng[i])) {
- ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
- secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
- }
-#endif
}
if (!r->infinity) {
@@ -625,27 +563,19 @@
secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
struct secp256k1_strauss_point_state ps[1];
-#ifdef USE_ENDOMORPHISM
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
-#endif
struct secp256k1_strauss_state state;
state.prej = prej;
state.zr = zr;
state.pre_a = pre_a;
-#ifdef USE_ENDOMORPHISM
state.pre_a_lam = pre_a_lam;
-#endif
state.ps = ps;
secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng);
}
static size_t secp256k1_strauss_scratch_size(size_t n_points) {
-#ifdef USE_ENDOMORPHISM
static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
-#else
- static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
-#endif
return n_points*point_size;
}
@@ -665,12 +595,8 @@
scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
-#ifdef USE_ENDOMORPHISM
state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
-#else
- state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
-#endif
state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
@@ -868,7 +794,6 @@
* set of buckets) for a given number of points.
*/
static int secp256k1_pippenger_bucket_window(size_t n) {
-#ifdef USE_ENDOMORPHISM
if (n <= 1) {
return 1;
} else if (n <= 4) {
@@ -892,33 +817,6 @@
} else {
return PIPPENGER_MAX_BUCKET_WINDOW;
}
-#else
- if (n <= 1) {
- return 1;
- } else if (n <= 11) {
- return 2;
- } else if (n <= 45) {
- return 3;
- } else if (n <= 100) {
- return 4;
- } else if (n <= 275) {
- return 5;
- } else if (n <= 625) {
- return 6;
- } else if (n <= 1850) {
- return 7;
- } else if (n <= 3400) {
- return 8;
- } else if (n <= 9630) {
- return 9;
- } else if (n <= 17900) {
- return 10;
- } else if (n <= 32800) {
- return 11;
- } else {
- return PIPPENGER_MAX_BUCKET_WINDOW;
- }
-#endif
}
/**
@@ -926,7 +824,6 @@
*/
static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
switch(bucket_window) {
-#ifdef USE_ENDOMORPHISM
case 1: return 1;
case 2: return 4;
case 3: return 20;
@@ -939,26 +836,11 @@
case 10: return 7880;
case 11: return 16050;
case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
-#else
- case 1: return 1;
- case 2: return 11;
- case 3: return 45;
- case 4: return 100;
- case 5: return 275;
- case 6: return 625;
- case 7: return 1850;
- case 8: return 3400;
- case 9: return 9630;
- case 10: return 17900;
- case 11: return 32800;
- case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
-#endif
}
return 0;
}
-#ifdef USE_ENDOMORPHISM
SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) {
secp256k1_scalar tmp = *s1;
secp256k1_scalar_split_lambda(s1, s2, &tmp);
@@ -973,32 +855,23 @@
secp256k1_ge_neg(p2, p2);
}
}
-#endif
/**
* Returns the scratch size required for a given number of points (excluding
* base point G) without considering alignment.
*/
static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
-#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
-#else
- size_t entries = n_points + 1;
-#endif
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
- /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
+ /* Use 2(n+1) with the endomorphism, when calculating batch
* sizes. The reason for +1 is that we add the G scalar to the list of
* other scalars. */
-#ifdef USE_ENDOMORPHISM
size_t entries = 2*n_points + 2;
-#else
- size_t entries = n_points + 1;
-#endif
secp256k1_ge *points;
secp256k1_scalar *scalars;
secp256k1_gej *buckets;
@@ -1035,10 +908,8 @@
scalars[0] = *inp_g_sc;
points[0] = secp256k1_ge_const_g;
idx++;
-#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
idx++;
-#endif
}
while (point_idx < n_points) {
@@ -1047,10 +918,8 @@
return 0;
}
idx++;
-#ifdef USE_ENDOMORPHISM
secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
idx++;
-#endif
point_idx++;
}
@@ -1093,9 +962,7 @@
size_t space_overhead;
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
-#ifdef USE_ENDOMORPHISM
entry_size = 2*entry_size;
-#endif
space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
if (space_overhead > max_alloc) {
break;
diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h
--- a/src/secp256k1/src/group.h
+++ b/src/secp256k1/src/group.h
@@ -116,10 +116,8 @@
/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv);
-#ifdef USE_ENDOMORPHISM
/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a);
-#endif
/** Clear a secp256k1_gej to prevent leaking sensitive information. */
static void secp256k1_gej_clear(secp256k1_gej *r);
diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h
--- a/src/secp256k1/src/group_impl.h
+++ b/src/secp256k1/src/group_impl.h
@@ -646,7 +646,6 @@
secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
}
-#ifdef USE_ENDOMORPHISM
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
static const secp256k1_fe beta = SECP256K1_FE_CONST(
0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
@@ -655,7 +654,6 @@
*r = *a;
secp256k1_fe_mul(&r->x, &r->x, &beta);
}
-#endif
static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) {
secp256k1_fe yz;
diff --git a/src/secp256k1/src/libsecp256k1-config.h.cmake.in b/src/secp256k1/src/libsecp256k1-config.h.cmake.in
--- a/src/secp256k1/src/libsecp256k1-config.h.cmake.in
+++ b/src/secp256k1/src/libsecp256k1-config.h.cmake.in
@@ -22,7 +22,6 @@
#cmakedefine USE_ASM_X86_64
#cmakedefine USE_EXTERNAL_ASM
-#cmakedefine USE_ENDOMORPHISM
#cmakedefine USE_EXTERNAL_DEFAULT_CALLBACKS
#cmakedefine USE_ECMULT_STATIC_PRECOMPUTATION
diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h
--- a/src/secp256k1/src/scalar.h
+++ b/src/secp256k1/src/scalar.h
@@ -102,13 +102,11 @@
/** Compare two scalars. */
static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
-#ifdef USE_ENDOMORPHISM
/** Find r1 and r2 such that r1+r2*2^128 = k. */
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
/** Find r1 and r2 such that r1+r2*lambda = k,
* where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k);
-#endif
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h
--- a/src/secp256k1/src/scalar_4x64_impl.h
+++ b/src/secp256k1/src/scalar_4x64_impl.h
@@ -912,7 +912,6 @@
secp256k1_scalar_reduce_512(r, l);
}
-#ifdef USE_ENDOMORPHISM
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
@@ -923,7 +922,6 @@
r2->d[2] = 0;
r2->d[3] = 0;
}
-#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h
--- a/src/secp256k1/src/scalar_8x32_impl.h
+++ b/src/secp256k1/src/scalar_8x32_impl.h
@@ -672,7 +672,6 @@
secp256k1_scalar_reduce_512(r, l);
}
-#ifdef USE_ENDOMORPHISM
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
@@ -691,7 +690,6 @@
r2->d[6] = 0;
r2->d[7] = 0;
}
-#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h
--- a/src/secp256k1/src/scalar_impl.h
+++ b/src/secp256k1/src/scalar_impl.h
@@ -256,7 +256,6 @@
#endif
}
-#ifdef USE_ENDOMORPHISM
/* These parameters are generated using sage/gen_exhaustive_groups.sage. */
#if defined(EXHAUSTIVE_TEST_ORDER)
# if EXHAUSTIVE_TEST_ORDER == 13
@@ -508,6 +507,5 @@
#endif
}
#endif
-#endif
#endif /* SECP256K1_SCALAR_IMPL_H */
diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c
--- a/src/secp256k1/src/tests.c
+++ b/src/secp256k1/src/tests.c
@@ -2102,17 +2102,12 @@
void test_ge(void) {
int i, i1;
-#ifdef USE_ENDOMORPHISM
int runs = 6;
-#else
- int runs = 4;
-#endif
- /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4).
- * The second in each pair of identical points uses a random Z coordinate in the Jacobian form.
- * All magnitudes are randomized.
- * All 17*17 combinations of points are added to each other, using all applicable methods.
- *
- * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well.
+ /* 25 points are used:
+ * - infinity
+ * - for each of four random points p1 p2 p3 p4, we add the point, its
+ * negation, and then those two again but with randomized Z coordinate.
+ * - The same is then done for lambda*p1 and lambda^2*p1.
*/
secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs));
secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs));
@@ -2127,14 +2122,12 @@
int j;
secp256k1_ge g;
random_group_element_test(&g);
-#ifdef USE_ENDOMORPHISM
if (i >= runs - 2) {
secp256k1_ge_mul_lambda(&g, &ge[1]);
}
if (i >= runs - 1) {
secp256k1_ge_mul_lambda(&g, &g);
}
-#endif
ge[1 + 4 * i] = g;
ge[2 + 4 * i] = g;
secp256k1_ge_neg(&ge[3 + 4 * i], &g);
@@ -3121,12 +3114,10 @@
CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0);
for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) {
-#ifdef USE_ENDOMORPHISM
/* Bucket_window of 8 is not used with endo */
if (i == 8) {
continue;
}
-#endif
CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i);
if (i != PIPPENGER_MAX_BUCKET_WINDOW) {
CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i);
@@ -3369,13 +3360,10 @@
secp256k1_scalar_set_int(&x, 0);
secp256k1_scalar_set_int(&shift, 1 << w);
- /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
-#ifdef USE_ENDOMORPHISM
for (i = 0; i < 16; ++i) {
secp256k1_scalar_shr_int(&num, 8);
}
bits = 128;
-#endif
skew = secp256k1_wnaf_const(wnaf, &num, w, bits);
for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) {
@@ -3410,12 +3398,9 @@
secp256k1_scalar_set_int(&x, 0);
secp256k1_scalar_set_int(&shift, 1 << w);
- /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
-#ifdef USE_ENDOMORPHISM
for (i = 0; i < 16; ++i) {
secp256k1_scalar_shr_int(&num, 8);
}
-#endif
skew = secp256k1_wnaf_fixed(wnaf, &num, w);
for (i = WNAF_SIZE(w)-1; i >= 0; --i) {
@@ -3631,7 +3616,6 @@
}
}
-#ifdef USE_ENDOMORPHISM
/***** ENDOMORPHISH TESTS *****/
void test_scalar_split(const secp256k1_scalar* full) {
secp256k1_scalar s, s1, slam;
@@ -3680,7 +3664,6 @@
test_scalar_split(&scalars_near_split_bounds[i]);
}
}
-#endif
void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) {
unsigned char pubkeyc[65];
@@ -5707,9 +5690,7 @@
run_ec_combine();
/* endomorphism tests */
-#ifdef USE_ENDOMORPHISM
run_endomorphism_tests();
-#endif
/* EC point parser test */
run_ec_pubkey_parse_test();
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
--- a/src/secp256k1/src/tests_exhaustive.c
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -95,7 +95,6 @@
return 1;
}
-#ifdef USE_ENDOMORPHISM
void test_exhaustive_endomorphism(const secp256k1_ge *group) {
int i;
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
@@ -104,7 +103,6 @@
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
}
}
-#endif
void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) {
int i, j;
@@ -429,9 +427,7 @@
}
/* Run the tests */
-#ifdef USE_ENDOMORPHISM
test_exhaustive_endomorphism(group);
-#endif
test_exhaustive_addition(group, groupj);
test_exhaustive_ecmult(ctx, group, groupj);
test_exhaustive_ecmult_multi(ctx, group);
diff --git a/src/secp256k1/travis/build_autotools.sh b/src/secp256k1/travis/build_autotools.sh
--- a/src/secp256k1/travis/build_autotools.sh
+++ b/src/secp256k1/travis/build_autotools.sh
@@ -36,7 +36,6 @@
../configure \
--enable-experimental=$EXPERIMENTAL \
- --enable-endomorphism=$ENDOMORPHISM \
--with-test-override-wide-multiply=$WIDEMUL \
--with-bignum=$BIGNUM \
--with-asm=$ASM \
diff --git a/src/secp256k1/travis/build_cmake.sh b/src/secp256k1/travis/build_cmake.sh
--- a/src/secp256k1/travis/build_cmake.sh
+++ b/src/secp256k1/travis/build_cmake.sh
@@ -45,7 +45,6 @@
-DSECP256K1_ENABLE_MODULE_EXTRAKEYS=$SCHNORRSIG \
-DSECP256K1_ENABLE_MODULE_SCHNORRSIG=$SCHNORRSIG \
-DSECP256K1_ENABLE_JNI=$JNI \
- -DSECP256K1_ENABLE_ENDOMORPHISM=$ENDOMORPHISM \
-DSECP256K1_ENABLE_BIGNUM=$BIGNUM \
-DSECP256K1_USE_ASM=$ASM \
-DSECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY=$WIDEMUL \
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sat, Mar 1, 10:48 (11 h, 33 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
5187400
Default Alt Text
D8044.diff (36 KB)
Attached To
D8044: [SECP256K1] Rip out non-endomorphism code
Event Timeline
Log In to Comment