diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt index df6d95885..4d91494b7 100644 --- a/src/secp256k1/CMakeLists.txt +++ b/src/secp256k1/CMakeLists.txt @@ -1,242 +1,246 @@ # Copyright (c) 2017 The Bitcoin developers cmake_minimum_required(VERSION 3.13) project(secp256k1 LANGUAGES C) # Add path for custom modules when building as a standalone project list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules) include(AddCompilerFlags) # libsecp256k1 use a different set of flags. add_c_compiler_flags( -pedantic -Wall -Wextra -Wcast-align -Wshadow -Wno-unused-function -Wno-overlength-strings -std=c89 -Wnested-externs -Wstrict-prototypes -Wno-long-long ) # Default visibility is hidden on all targets. set(CMAKE_C_VISIBILITY_PRESET hidden) include_directories( . src # For the config ${CMAKE_CURRENT_BINARY_DIR}/src ) # The library add_library(secp256k1 src/secp256k1.c) target_include_directories(secp256k1 PUBLIC include) # We need to link in GMP find_package(GMP) if(GMP_FOUND) target_include_directories(secp256k1 PUBLIC ${GMP_INCLUDE_DIR}) target_link_libraries(secp256k1 ${GMP_LIBRARY}) set(USE_NUM_GMP 1) set(USE_FIELD_INV_NUM 1) set(USE_SCALAR_INV_NUM 1) else() set(USE_NUM_NONE 1) set(USE_FIELD_INV_BUILTIN 1) set(USE_SCALAR_INV_BUILTIN 1) endif() # We check if amd64 asm is supported. check_c_source_compiles(" #include int main() { uint64_t a = 11, tmp; __asm__ __volatile__(\"movq \$0x100000000,%1; mulq %%rsi\" : \"+a\"(a) : \"S\"(tmp) : \"cc\", \"%rdx\"); return 0; } " USE_ASM_X86_64) # We make sure __int128 is defined include(CheckTypeSize) check_type_size(__int128 SIZEOF___INT128) if(SIZEOF___INT128 EQUAL 16) set(HAVE___INT128 1) else() # If we do not support __int128, we should be falling back # on 32bits implementations for field and scalar. endif() # Detect if we are on a 32 or 64 bits platform and chose # scalar and field implementation accordingly. if(CMAKE_SIZEOF_VOID_P EQUAL 8) # 64 bits implementation require either __int128 or asm support. if (HAVE___INT128 OR USE_ASM_X86_64) set(USE_SCALAR_4X64 1) set(USE_FIELD_5X52 1) else() message(SEND_ERROR "Compiler does not support __int128 or inline assembly") endif() else() set(USE_SCALAR_8X32 1) set(USE_FIELD_10X26 1) endif() # Executable internal to secp256k1 need to have the HAVE_CONFIG_H define set. # For convenience, we wrap this into a function. function(link_secp256k1_internal NAME) target_link_libraries(${NAME} secp256k1) target_compile_definitions(${NAME} PRIVATE HAVE_CONFIG_H SECP256K1_BUILD) endfunction(link_secp256k1_internal) # Phony target to build benchmarks add_custom_target(bench-secp256k1) function(add_secp256k1_bench NAME) set(EXECUTABLE_NAME "${NAME}-bench") add_executable(${EXECUTABLE_NAME} EXCLUDE_FROM_ALL ${ARGN}) link_secp256k1_internal(${EXECUTABLE_NAME}) set(BENCH_NAME "bench-secp256k1-${NAME}") add_custom_target(${BENCH_NAME} COMMAND ${EXECUTABLE_NAME} USES_TERMINAL) add_dependencies(bench-secp256k1 ${BENCH_NAME}) endfunction(add_secp256k1_bench) # ECDH module option(SECP256K1_ENABLE_MODULE_ECDH "Build libsecp256k1's ECDH module" OFF) if(SECP256K1_ENABLE_MODULE_ECDH) set(ENABLE_MODULE_ECDH 1) add_secp256k1_bench(ecdh src/bench_ecdh.c) endif() # MultiSet module option(SECP256K1_ENABLE_MODULE_MULTISET "Build libsecp256k1's MULTISET module" ON) if(SECP256K1_ENABLE_MODULE_MULTISET) set(ENABLE_MODULE_MULTISET 1) add_secp256k1_bench(multiset src/bench_multiset.c) endif() # Recovery module option(SECP256K1_ENABLE_MODULE_RECOVERY "Build libsecp256k1's recovery module" ON) if(SECP256K1_ENABLE_MODULE_RECOVERY) set(ENABLE_MODULE_RECOVERY 1) add_secp256k1_bench(recover src/bench_recover.c) endif() # Schnorr module option(SECP256K1_ENABLE_MODULE_SCHNORR "Build libsecp256k1's Schnorr module" ON) if(SECP256K1_ENABLE_MODULE_SCHNORR) set(ENABLE_MODULE_SCHNORR 1) endif() +# Make the emult window size customizable. +set(SECP256K1_ECMULT_WINDOW_SIZE 15 CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24].") + # Static precomputation for elliptic curve multiplication option(SECP256K1_ECMULT_STATIC_PRECOMPUTATION "Precompute libsecp256k1's elliptic curve multiplication tables" ON) if(SECP256K1_ECMULT_STATIC_PRECOMPUTATION) set(USE_ECMULT_STATIC_PRECOMPUTATION 1) - set(SECP256K1_ECMULT_WINDOW_SIZE 15 CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24].") if(${SECP256K1_ECMULT_WINDOW_SIZE} LESS 2 OR ${SECP256K1_ECMULT_WINDOW_SIZE} GREATER 24) message(FATAL_ERROR "SECP256K1_ECMULT_WINDOW_SIZE must be an integer in range [2..24]") endif() - # FIXME: Propagate SECP256K1_ECMULT_WINDOW_SIZE include(NativeExecutable) + native_add_cmake_flags( + "-DSECP256K1_ECMULT_WINDOW_SIZE=${SECP256K1_ECMULT_WINDOW_SIZE}" + ) add_native_executable(gen_context src/gen_context.c) add_custom_command( OUTPUT src/ecmult_static_context.h COMMAND gen_context ) target_sources(secp256k1 PRIVATE src/ecmult_static_context.h) endif() # Generate the config configure_file(src/libsecp256k1-config.h.cmake.in src/libsecp256k1-config.h ESCAPE_QUOTES) target_compile_definitions(secp256k1 PRIVATE HAVE_CONFIG_H SECP256K1_BUILD) # Build the Java binding option(SECP256K1_ENABLE_JNI "Enable the Java Native Interface binding" OFF) if(SECP256K1_ENABLE_JNI) if(NOT SECP256K1_ENABLE_MODULE_ECDH) message(FATAL_ERROR "The secp256k1 JNI support requires ECDH. Try again with -DSECP256K1_ENABLE_MODULE_ECDH=ON.") endif() find_package(Java REQUIRED) find_package(JNI REQUIRED) include(UseJava) add_library(secp256k1_jni SHARED src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c ) target_include_directories(secp256k1_jni PUBLIC ${JNI_INCLUDE_DIRS}) # As per CMake documentation: the POSITION_INDEPENDENT_CODE property is set # when a target is created. It defaults to True for SHARED or MODULE library # targets and False otherwise. # The secp256ki_jni library being shared, the property is set and it will # build with PIC enabled. But the secp256k1 dependency might not have the # property set, so it's associated source files won't be built with PIC # enabled. That would cause the linker to fail. # Forcing the property for the secp256k1 library fixes the issue. set_target_properties(secp256k1 PROPERTIES POSITION_INDEPENDENT_CODE ON) link_secp256k1_internal(secp256k1_jni) endif() # Tests option(SECP256K1_BUILD_TEST "Build secp256k1's unit tests" ON) if(SECP256K1_BUILD_TEST) include(TestSuite) create_test_suite(secp256k1) function(create_secp256k1_test NAME FILES) add_test_to_suite(secp256k1 ${NAME} EXCLUDE_FROM_ALL ${FILES}) link_secp256k1_internal(${NAME}) endfunction() create_secp256k1_test(tests src/tests.c) target_compile_definitions(tests PRIVATE VERIFY) create_secp256k1_test(exhaustive_tests src/tests_exhaustive.c) # This should not be enabled at the same time as coverage is. # TODO: support coverage. target_compile_definitions(exhaustive_tests PRIVATE VERIFY) if(SECP256K1_ENABLE_JNI) set(SECP256k1_JNI_TEST_JAR "secp256k1-jni-test") set(CMAKE_JNI_TARGET TRUE) add_jar(secp256k1-jni-test-jar SOURCES src/java/org/bitcoin/NativeSecp256k1.java src/java/org/bitcoin/NativeSecp256k1Test.java src/java/org/bitcoin/NativeSecp256k1Util.java src/java/org/bitcoin/Secp256k1Context.java ENTRY_POINT org/bitcoin/NativeSecp256k1Test OUTPUT_NAME "${SECP256k1_JNI_TEST_JAR}" ) add_dependencies(secp256k1-jni-test-jar secp256k1_jni) add_custom_target(check-secp256k1-java COMMAND "${Java_JAVA_EXECUTABLE}" "-Djava.library.path=${CMAKE_CURRENT_BINARY_DIR}" "-jar" "${SECP256k1_JNI_TEST_JAR}.jar" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" ) add_dependencies(check-secp256k1-java secp256k1-jni-test-jar) add_dependencies(check-secp256k1 check-secp256k1-java) endif() endif(SECP256K1_BUILD_TEST) # Benchmarks add_secp256k1_bench(verify src/bench_verify.c) add_secp256k1_bench(sign src/bench_sign.c) add_secp256k1_bench(internal src/bench_internal.c) add_secp256k1_bench(ecmult src/bench_ecmult.c) diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac index 62c787b9a..1538b2206 100644 --- a/src/secp256k1/configure.ac +++ b/src/secp256k1/configure.ac @@ -1,546 +1,578 @@ AC_PREREQ([2.60]) AC_INIT([libsecp256k1],[0.1]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([build-aux/m4]) AC_CANONICAL_HOST AH_TOP([#ifndef LIBSECP256K1_CONFIG_H]) AH_TOP([#define LIBSECP256K1_CONFIG_H]) AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/]) AM_INIT_AUTOMAKE([foreign subdir-objects]) LT_INIT dnl make the compilation flags quiet unless V=1 is used m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) PKG_PROG_PKG_CONFIG AC_PATH_TOOL(AR, ar) AC_PATH_TOOL(RANLIB, ranlib) AC_PATH_TOOL(STRIP, strip) AX_PROG_CC_FOR_BUILD if test "x$CFLAGS" = "x"; then CFLAGS="-g" fi AM_PROG_CC_C_O AC_PROG_CC_C89 if test x"$ac_cv_prog_cc_c89" = x"no"; then AC_MSG_ERROR([c89 compiler support required]) fi AM_PROG_AS case $host_os in *darwin*) if test x$cross_compiling != xyes; then AC_PATH_PROG([BREW],brew,) if test x$BREW != x; then dnl These Homebrew packages may be keg-only, meaning that they won't be found dnl in expected paths because they may conflict with system files. Ask dnl Homebrew where each one is located, then adjust paths accordingly. openssl_prefix=`$BREW --prefix openssl 2>/dev/null` gmp_prefix=`$BREW --prefix gmp 2>/dev/null` if test x$openssl_prefix != x; then PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" export PKG_CONFIG_PATH fi if test x$gmp_prefix != x; then GMP_CPPFLAGS="-I$gmp_prefix/include" GMP_LIBS="-L$gmp_prefix/lib" fi else AC_PATH_PROG([PORT],port,) dnl if homebrew isn't installed and macports is, add the macports default paths dnl as a last resort. if test x$PORT != x; then CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" LDFLAGS="$LDFLAGS -L/opt/local/lib" fi fi fi ;; esac CFLAGS="$CFLAGS -W" warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings" saved_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $warn_CFLAGS" AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [ AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) CFLAGS="$saved_CFLAGS" ]) saved_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -fvisibility=hidden" AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [ AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) CFLAGS="$saved_CFLAGS" ]) AC_ARG_ENABLE(benchmark, - AS_HELP_STRING([--enable-benchmark],[compile benchmark (default is yes)]), + AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]), [use_benchmark=$enableval], [use_benchmark=yes]) AC_ARG_ENABLE(coverage, - AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis]), + AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]), [enable_coverage=$enableval], [enable_coverage=no]) AC_ARG_ENABLE(tests, - AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]), + AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]), [use_tests=$enableval], [use_tests=yes]) AC_ARG_ENABLE(openssl_tests, - AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests, if OpenSSL is available (default is auto)]), + AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests [default=auto]]), [enable_openssl_tests=$enableval], [enable_openssl_tests=auto]) AC_ARG_ENABLE(experimental, - AS_HELP_STRING([--enable-experimental],[allow experimental configure options (default is no)]), + AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]), [use_experimental=$enableval], [use_experimental=no]) AC_ARG_ENABLE(exhaustive_tests, - AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]), + AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]), [use_exhaustive_tests=$enableval], [use_exhaustive_tests=yes]) AC_ARG_ENABLE(endomorphism, - AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]), + AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]), [use_endomorphism=$enableval], [use_endomorphism=no]) AC_ARG_ENABLE(ecmult_static_precomputation, - AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing (default is yes)]), + AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]), [use_ecmult_static_precomputation=$enableval], [use_ecmult_static_precomputation=auto]) AC_ARG_ENABLE(module_ecdh, AS_HELP_STRING([--enable-module-ecdh],[enable ECDH shared secret computation (experimental)]), [enable_module_ecdh=$enableval], [enable_module_ecdh=no]) AC_ARG_ENABLE(module_multiset, AS_HELP_STRING([--enable-module-multiset],[enable multiset operations (experimental)]), [enable_module_multiset=$enableval], [enable_module_multiset=no]) AC_ARG_ENABLE(module_recovery, - AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]), + AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]), [enable_module_recovery=$enableval], [enable_module_recovery=no]) AC_ARG_ENABLE(module_schnorr, - AS_HELP_STRING([--enable-module-schnorr],[enable Schnorr signatures module (default is yes)]), + AS_HELP_STRING([--enable-module-schnorr],[enable Schnorr signatures module [default=yes]]), [enable_module_schnorr=$enableval], [enable_module_schnorr=yes]) AC_ARG_ENABLE(jni, - AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni (default is no)]), + AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]), [use_jni=$enableval], [use_jni=no]) AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto], -[Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto]) +[finite field implementation to use [default=auto]])],[req_field=$withval], [req_field=auto]) AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], -[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto]) +[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto]) AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto], -[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto]) +[scalar implementation to use [default=auto]])],[req_scalar=$withval], [req_scalar=auto]) -AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto] -[Specify assembly optimizations to use. Default is auto (experimental: arm)])],[req_asm=$withval], [req_asm=auto]) +AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], +[assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto]) + +AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto], +[window size for ecmult precomputation for verification, specified as integer in range [2..24].] +[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.] +[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.] +[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.] +["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]] +)], +[req_ecmult_window=$withval], [req_ecmult_window=auto]) AC_CHECK_TYPES([__int128]) if test x"$enable_coverage" = x"yes"; then AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) CFLAGS="$CFLAGS -O0 --coverage" LDFLAGS="$LDFLAGS --coverage" else CFLAGS="$CFLAGS -O3" fi if test x"$use_ecmult_static_precomputation" != x"no"; then # Temporarily switch to an environment for the native compiler save_cross_compiling=$cross_compiling cross_compiling=no SAVE_CC="$CC" CC="$CC_FOR_BUILD" SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS_FOR_BUILD" SAVE_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS_FOR_BUILD" SAVE_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS_FOR_BUILD" warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" saved_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $warn_CFLAGS_FOR_BUILD" AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [ AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) CFLAGS="$saved_CFLAGS" ]) AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) AC_RUN_IFELSE( [AC_LANG_PROGRAM([], [])], [working_native_cc=yes], [working_native_cc=no],[dnl]) CFLAGS_FOR_BUILD="$CFLAGS" # Restore the environment cross_compiling=$save_cross_compiling CC="$SAVE_CC" CFLAGS="$SAVE_CFLAGS" CPPFLAGS="$SAVE_CPPFLAGS" LDFLAGS="$SAVE_LDFLAGS" if test x"$working_native_cc" = x"no"; then AC_MSG_RESULT([no]) set_precomp=no m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) if test x"$use_ecmult_static_precomputation" = x"yes"; then AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) else AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) fi else AC_MSG_RESULT([yes]) set_precomp=yes fi else set_precomp=no fi if test x"$req_asm" = x"auto"; then SECP_64BIT_ASM_CHECK if test x"$has_64bit_asm" = x"yes"; then set_asm=x86_64 fi if test x"$set_asm" = x; then set_asm=no fi else set_asm=$req_asm case $set_asm in x86_64) SECP_64BIT_ASM_CHECK if test x"$has_64bit_asm" != x"yes"; then AC_MSG_ERROR([x86_64 assembly optimization requested but not available]) fi ;; arm) ;; no) ;; *) AC_MSG_ERROR([invalid assembly optimization selection]) ;; esac fi if test x"$req_field" = x"auto"; then if test x"set_asm" = x"x86_64"; then set_field=64bit fi if test x"$set_field" = x; then SECP_INT128_CHECK if test x"$has_int128" = x"yes"; then set_field=64bit fi fi if test x"$set_field" = x; then set_field=32bit fi else set_field=$req_field case $set_field in 64bit) if test x"$set_asm" != x"x86_64"; then SECP_INT128_CHECK if test x"$has_int128" != x"yes"; then AC_MSG_ERROR([64bit field explicitly requested but neither __int128 support or x86_64 assembly available]) fi fi ;; 32bit) ;; *) AC_MSG_ERROR([invalid field implementation selection]) ;; esac fi if test x"$req_scalar" = x"auto"; then SECP_INT128_CHECK if test x"$has_int128" = x"yes"; then set_scalar=64bit fi if test x"$set_scalar" = x; then set_scalar=32bit fi else set_scalar=$req_scalar case $set_scalar in 64bit) SECP_INT128_CHECK if test x"$has_int128" != x"yes"; then AC_MSG_ERROR([64bit scalar explicitly requested but __int128 support not available]) fi ;; 32bit) ;; *) AC_MSG_ERROR([invalid scalar implementation selected]) ;; esac fi if test x"$req_bignum" = x"auto"; then SECP_GMP_CHECK if test x"$has_gmp" = x"yes"; then set_bignum=gmp fi if test x"$set_bignum" = x; then set_bignum=no fi else set_bignum=$req_bignum case $set_bignum in gmp) SECP_GMP_CHECK if test x"$has_gmp" != x"yes"; then AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available]) fi ;; no) ;; *) AC_MSG_ERROR([invalid bignum implementation selection]) ;; esac fi # select assembly optimization use_external_asm=no case $set_asm in x86_64) AC_DEFINE(USE_ASM_X86_64, 1, [Define this symbol to enable x86_64 assembly optimizations]) ;; arm) use_external_asm=yes ;; no) ;; *) AC_MSG_ERROR([invalid assembly optimizations]) ;; esac # select field implementation case $set_field in 64bit) AC_DEFINE(USE_FIELD_5X52, 1, [Define this symbol to use the FIELD_5X52 implementation]) ;; 32bit) AC_DEFINE(USE_FIELD_10X26, 1, [Define this symbol to use the FIELD_10X26 implementation]) ;; *) AC_MSG_ERROR([invalid field implementation]) ;; esac # select bignum implementation case $set_bignum in gmp) AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed]) AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num]) AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation]) AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation]) ;; no) AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation]) AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation]) AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation]) ;; *) AC_MSG_ERROR([invalid bignum implementation]) ;; esac #select scalar implementation case $set_scalar in 64bit) AC_DEFINE(USE_SCALAR_4X64, 1, [Define this symbol to use the 4x64 scalar implementation]) ;; 32bit) AC_DEFINE(USE_SCALAR_8X32, 1, [Define this symbol to use the 8x32 scalar implementation]) ;; *) AC_MSG_ERROR([invalid scalar implementation]) ;; esac +#set ecmult window size +if test x"$req_ecmult_window" = x"auto"; then + set_ecmult_window=15 +else + set_ecmult_window=$req_ecmult_window +fi + +error_window_size=['window size for ecmult precomputation not an integer in range [2..24] or "auto"'] +case $set_ecmult_window in +''|*[[!0-9]]*) + # no valid integer + AC_MSG_ERROR($error_window_size) + ;; +*) + if test "$set_ecmult_window" -lt 2 -o "$set_ecmult_window" -gt 24 ; then + # not in range + AC_MSG_ERROR($error_window_size) + fi + AC_DEFINE_UNQUOTED(ECMULT_WINDOW_SIZE, $set_ecmult_window, [Set window size for ecmult precomputation]) + ;; +esac + if test x"$use_tests" = x"yes"; then SECP_OPENSSL_CHECK if test x"$has_openssl_ec" = x"yes"; then if test x"$enable_openssl_tests" != x"no"; then AC_DEFINE(ENABLE_OPENSSL_TESTS, 1, [Define this symbol if OpenSSL EC functions are available]) SECP_TEST_INCLUDES="$SSL_CFLAGS $CRYPTO_CFLAGS" SECP_TEST_LIBS="$CRYPTO_LIBS" case $host in *mingw*) SECP_TEST_LIBS="$SECP_TEST_LIBS -lgdi32" ;; esac fi else if test x"$enable_openssl_tests" = x"yes"; then AC_MSG_ERROR([OpenSSL tests requested but OpenSSL with EC support is not available]) fi fi else if test x"$enable_openssl_tests" = x"yes"; then AC_MSG_ERROR([OpenSSL tests requested but tests are not enabled]) fi fi if test x"$use_jni" != x"no"; then AX_JNI_INCLUDE_DIR have_jni_dependencies=yes if test x"$enable_module_ecdh" = x"no"; then have_jni_dependencies=no fi if test "x$JNI_INCLUDE_DIRS" = "x"; then have_jni_dependencies=no fi if test "x$have_jni_dependencies" = "xno"; then if test x"$use_jni" = x"yes"; then AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and try again.]) fi AC_MSG_WARN([jni headers/dependencies not found. jni support disabled]) use_jni=no else use_jni=yes for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS; do JNI_INCLUDES="$JNI_INCLUDES -I$JNI_INCLUDE_DIR" done fi fi if test x"$set_bignum" = x"gmp"; then SECP_LIBS="$SECP_LIBS $GMP_LIBS" SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" fi if test x"$use_endomorphism" = x"yes"; then AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) fi if test x"$set_precomp" = x"yes"; then AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table]) fi if test x"$enable_module_ecdh" = x"yes"; then AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module]) fi if test x"$enable_module_multiset" = x"yes"; then AC_DEFINE(ENABLE_MODULE_MULTISET, 1, [Define this symbol to enable the multiset module]) fi if test x"$enable_module_recovery" = x"yes"; then AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module]) fi if test x"$enable_module_schnorr" = x"yes"; then AC_DEFINE(ENABLE_MODULE_SCHNORR, 1, [Define this symbol to enable the Schnorr signature module]) fi AC_C_BIGENDIAN() if test x"$use_external_asm" = x"yes"; then AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) fi if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([******]) AC_MSG_NOTICE([WARNING: experimental build]) AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.]) AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) AC_MSG_NOTICE([******]) else if test x"$enable_module_ecdh" = x"yes"; then AC_MSG_ERROR([ECDH module is experimental. Use --enable-experimental to allow.]) fi if test x"$set_asm" = x"arm"; then AC_MSG_ERROR([ARM assembly optimization is experimental. Use --enable-experimental to allow.]) fi fi AC_CONFIG_HEADERS([src/libsecp256k1-config.h]) AC_CONFIG_FILES([Makefile libsecp256k1.pc]) AC_SUBST(JNI_INCLUDES) AC_SUBST(SECP_INCLUDES) AC_SUBST(SECP_LIBS) AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_MULTISET], [test x"$enable_module_multiset" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_SCHNORR], [test x"$enable_module_schnorr" = x"yes"]) AM_CONDITIONAL([USE_JNI], [test x"$use_jni" = x"yes"]) AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) dnl make sure nothing new is exported so that we don't break the cache PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" unset PKG_CONFIG_PATH PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" AC_OUTPUT echo echo "Build Options:" echo " with endomorphism = $use_endomorphism" echo " with ecmult precomp = $set_precomp" echo " with jni = $use_jni" echo " with benchmarks = $use_benchmark" echo " with coverage = $enable_coverage" echo " module ecdh = $enable_module_ecdh" echo " module recovery = $enable_module_recovery" echo " module multiset = $enable_module_multiset" echo " module schnorr = $enable_module_schnorr" echo echo " asm = $set_asm" echo " bignum = $set_bignum" echo " field = $set_field" echo " scalar = $set_scalar" +echo " ecmult window size = $set_ecmult_window" echo echo " CC = $CC" echo " CFLAGS = $CFLAGS" echo " CPPFLAGS = $CPPFLAGS" echo " LDFLAGS = $LDFLAGS" echo diff --git a/src/secp256k1/src/basic-config.h b/src/secp256k1/src/basic-config.h index 8a4ade631..840cddc76 100644 --- a/src/secp256k1/src/basic-config.h +++ b/src/secp256k1/src/basic-config.h @@ -1,34 +1,35 @@ /********************************************************************** * Copyright (c) 2013, 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_BASIC_CONFIG_H #define SECP256K1_BASIC_CONFIG_H #ifdef USE_BASIC_CONFIG #undef USE_ASM_X86_64 #undef USE_ECMULT_STATIC_PRECOMPUTATION #undef USE_ENDOMORPHISM #undef USE_FIELD_10X26 #undef USE_FIELD_5X52 #undef USE_FIELD_INV_BUILTIN #undef USE_FIELD_INV_NUM #undef USE_NUM_GMP #undef USE_NUM_NONE #undef USE_SCALAR_4X64 #undef USE_SCALAR_8X32 #undef USE_SCALAR_INV_BUILTIN #undef USE_SCALAR_INV_NUM #define USE_NUM_NONE 1 #define USE_FIELD_INV_BUILTIN 1 #define USE_SCALAR_INV_BUILTIN 1 #define USE_FIELD_10X26 1 #define USE_SCALAR_8X32 1 +#define ECMULT_WINDOW_SIZE 15 #endif /* USE_BASIC_CONFIG */ #endif /* SECP256K1_BASIC_CONFIG_H */ diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h index 4eadf5f52..cae8430a4 100644 --- a/src/secp256k1/src/ecmult_impl.h +++ b/src/secp256k1/src/ecmult_impl.h @@ -1,1183 +1,1207 @@ /***************************************************************************** * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php. * *****************************************************************************/ #ifndef SECP256K1_ECMULT_IMPL_H #define SECP256K1_ECMULT_IMPL_H #include #include #include "group.h" #include "scalar.h" #include "ecmult.h" #if defined(EXHAUSTIVE_TEST_ORDER) /* We need to lower these values for exhaustive tests because * the tables cannot have infinities in them (this breaks the * affine-isomorphism stuff which tracks z-ratios) */ # if EXHAUSTIVE_TEST_ORDER > 128 # define WINDOW_A 5 # define WINDOW_G 8 # elif EXHAUSTIVE_TEST_ORDER > 8 # define WINDOW_A 4 # define WINDOW_G 4 # else # define WINDOW_A 2 # define WINDOW_G 2 # endif #else /* optimal for 128-bit and 256-bit exponents. */ -#define WINDOW_A 5 -/** larger numbers may result in slightly better performance, at the cost of - exponentially larger precomputed tables. */ -#ifdef USE_ENDOMORPHISM -/** Two tables for window size 15: 1.375 MiB. */ -#define WINDOW_G 15 -#else -/** One table for window size 16: 1.375 MiB. */ -#define WINDOW_G 16 +# define WINDOW_A 5 +/** Larger values for ECMULT_WINDOW_SIZE result in possibly better + * performance at the cost of an exponentially larger precomputed + * table. The exact table size is + * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, + * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can + * be larger due to platform-specific padding and alignment. + * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM) + * two tables of this size are used instead of only one. + */ +# define WINDOW_G ECMULT_WINDOW_SIZE #endif + +/* Noone will ever need more than a window size of 24. The code might + * be correct for larger values of ECMULT_WINDOW_SIZE but this is not + * tested. + * + * The following limitations are known, and there are probably more: + * If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect + * because the size of the memory object that we allocate (in bytes) + * will not fit in a size_t. + * If WINDOW_G > 31 and int has 32 bits, then the code is incorrect + * because certain expressions will overflow. + */ +#if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24 +# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24]. #endif #ifdef USE_ENDOMORPHISM #define WNAF_BITS 128 #else #define WNAF_BITS 256 #endif #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w)) #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) /** The number of entries a table with precomputed multiples needs to have. */ #define ECMULT_TABLE_SIZE(w) (1 << ((w)-2)) /* The number of objects allocated on the scratch space for ecmult_multi algorithms */ #define PIPPENGER_SCRATCH_OBJECTS 6 #define STRAUSS_SCRATCH_OBJECTS 6 #define PIPPENGER_MAX_BUCKET_WINDOW 12 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */ #ifdef USE_ENDOMORPHISM #define ECMULT_PIPPENGER_THRESHOLD 88 #else #define ECMULT_PIPPENGER_THRESHOLD 160 #endif #ifdef USE_ENDOMORPHISM #define ECMULT_MAX_POINTS_PER_BATCH 5000000 #else #define ECMULT_MAX_POINTS_PER_BATCH 10000000 #endif /** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. * Prej's Z values are undefined, except for the last value. */ static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) { secp256k1_gej d; secp256k1_ge a_ge, d_ge; int i; VERIFY_CHECK(!a->infinity); secp256k1_gej_double_var(&d, a, NULL); /* * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate * of 'd', and scale the 1P starting value's x/y coordinates without changing its z. */ d_ge.x = d.x; d_ge.y = d.y; d_ge.infinity = 0; secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z); prej[0].x = a_ge.x; prej[0].y = a_ge.y; prej[0].z = a->z; prej[0].infinity = 0; zr[0] = d.z; for (i = 1; i < n; i++) { secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); } /* * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only * the final point's z coordinate is actually used though, so just update that. */ secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); } /** Fill a table 'pre' with precomputed odd multiples of a. * * There are two versions of this function: * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its * resulting point set to a single constant Z denominator, stores the X and Y * coordinates as ge_storage points in pre, and stores the global Z in rz. * It only operates on tables sized for WINDOW_A wnaf multiples. * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its * resulting point set to actually affine points, and stores those in pre. * It operates on tables of any size, but uses heap-allocated temporaries. * * To compute a*P + b*G, we compute a table for P using the first function, * and for G using the second (which requires an inverse, but it only needs to * happen once). */ static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) { secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; /* Compute the odd multiples in Jacobian form. */ secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); /* Bring them to the same Z denominator. */ secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); } static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) { secp256k1_gej d; secp256k1_ge d_ge, p_ge; secp256k1_gej pj; secp256k1_fe zi; secp256k1_fe zr; secp256k1_fe dx_over_dz_squared; int i; VERIFY_CHECK(!a->infinity); secp256k1_gej_double_var(&d, a, NULL); /* First, we perform all the additions in an isomorphic curve obtained by multiplying * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use * `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store * the resulting y-coordinate and the z-ratio, since we only have enough memory to * store two field elements. These are sufficient to efficiently undo the isomorphism * and recompute all the `x`s. */ d_ge.x = d.x; d_ge.y = d.y; d_ge.infinity = 0; secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z); pj.x = p_ge.x; pj.y = p_ge.y; pj.z = a->z; pj.infinity = 0; for (i = 0; i < (n - 1); i++) { secp256k1_fe_normalize_var(&pj.y); secp256k1_fe_to_storage(&pre[i].y, &pj.y); secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr); secp256k1_fe_normalize_var(&zr); secp256k1_fe_to_storage(&pre[i].x, &zr); } /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */ secp256k1_fe_mul(&zi, &pj.z, &d.z); secp256k1_fe_inv_var(&zi, &zi); /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so * that we can combine it with the saved z-ratios to compute the other zs * without any more inversions. */ secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi); secp256k1_ge_to_storage(&pre[n - 1], &p_ge); /* Compute the actual x-coordinate of D, which will be needed below. */ secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ secp256k1_fe_sqr(&dx_over_dz_squared, &d.z); secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); /* Going into the second loop, we have set `pre[n-1]` to its final affine * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We * have `zi = (p.z * d.z)^-1`, where * * `p.z` is the z-coordinate of the point on the isomorphic curve * which was ultimately assigned to `pre[n-1]`. * `d.z` is the multiplier that must be applied to all z-coordinates * to move from our isomorphic curve back to secp256k1; so the * product `p.z * d.z` is the z-coordinate of the secp256k1 * point assigned to `pre[n-1]`. * * All subsequent inverse-z-coordinates can be obtained by multiplying this * factor by successive z-ratios, which is much more efficient than directly * computing each one. * * Importantly, these inverse-zs will be coordinates of points on secp256k1, * while our other stored values come from computations on the isomorphic * curve. So in the below loop, we will take care not to actually use `zi` * or any derived values until we're back on secp256k1. */ i = n - 1; while (i > 0) { secp256k1_fe zi2, zi3; const secp256k1_fe *rzr; i--; secp256k1_ge_from_storage(&p_ge, &pre[i]); /* For each remaining point, we extract the z-ratio from the stored * x-coordinate, compute its z^-1 from that, and compute the full * point from that. */ rzr = &p_ge.x; secp256k1_fe_mul(&zi, &zi, rzr); secp256k1_fe_sqr(&zi2, &zi); secp256k1_fe_mul(&zi3, &zi2, &zi); /* To compute the actual x-coordinate, we use the stored z ratio and * y-coordinate, which we obtained from `secp256k1_gej_add_ge_var` * in the loop above, as well as the inverse of the square of its * z-coordinate. We store the latter in the `zi2` variable, which is * computed iteratively starting from the overall Z inverse then * multiplying by each z-ratio in turn. * * Denoting the z-ratio as `rzr`, we observe that it is equal to `h` * from the inside of the above `gej_add_ge_var` call. This satisfies * * rzr = d_x * z^2 - x * d_z^2 * * where (`d_x`, `d_z`) are Jacobian coordinates of `D` and `(x, z)` * are Jacobian coordinates of our desired point -- except both are on * the isomorphic curve that we were using when we called `gej_add_ge_var`. * To get back to secp256k1, we must multiply both `z`s by `d_z`, or * equivalently divide both `x`s by `d_z^2`. Our equation then becomes * * rzr = d_x * z^2 / d_z^2 - x * * (The left-hand-side, being a ratio of z-coordinates, is unaffected * by the isomorphism.) * * Rearranging to solve for `x`, we have * * x = d_x * z^2 / d_z^2 - rzr * * But what we actually want is the affine coordinate `X = x/z^2`, * which will satisfy * * X = d_x / d_z^2 - rzr / z^2 * = dx_over_dz_squared - rzr * zi2 */ secp256k1_fe_mul(&p_ge.x, rzr, &zi2); secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1); secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared); /* y is stored_y/z^3, as we expect */ secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3); /* Store */ secp256k1_ge_to_storage(&pre[i], &p_ge); } } /** The following two macro retrieves a particular odd multiple from a table * of precomputed multiples. */ #define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ if ((n) > 0) { \ *(r) = (pre)[((n)-1)/2]; \ } else { \ *(r) = (pre)[(-(n)-1)/2]; \ secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) #define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ if ((n) > 0) { \ secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ } else { \ secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { ctx->pre_g = NULL; #ifdef USE_ENDOMORPHISM ctx->pre_g_128 = NULL; #endif } static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) { secp256k1_gej gj; if (ctx->pre_g != NULL) { return; } /* get the generator */ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); - ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); + { + size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); + /* check for overflow */ + VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); + ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); + } /* precompute the tables with odd multiples */ secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); #ifdef USE_ENDOMORPHISM { secp256k1_gej g_128j; int i; - ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); + size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G)); + /* check for overflow */ + VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); + ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); /* calculate 2^128*generator */ g_128j = gj; for (i = 0; i < 128; i++) { secp256k1_gej_double_var(&g_128j, &g_128j, NULL); } secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); } #endif } static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src, const secp256k1_callback *cb) { if (src->pre_g == NULL) { dst->pre_g = NULL; } else { - size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); + size_t size = sizeof((*dst->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); memcpy(dst->pre_g, src->pre_g, size); } #ifdef USE_ENDOMORPHISM if (src->pre_g_128 == NULL) { dst->pre_g_128 = NULL; } else { - size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); + size_t size = sizeof((*dst->pre_g_128)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); memcpy(dst->pre_g_128, src->pre_g_128, size); } #endif } static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) { return ctx->pre_g != NULL; } static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) { free(ctx->pre_g); #ifdef USE_ENDOMORPHISM free(ctx->pre_g_128); #endif secp256k1_ecmult_context_init(ctx); } /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), * with the following guarantees: * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) * - two non-zero entries in wnaf are separated by at least w-1 zeroes. * - the number of set values in wnaf is returned. This number is at most 256, and at most one more * than the number of bits in the (absolute value) of the input. */ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) { secp256k1_scalar s = *a; int last_set_bit = -1; int bit = 0; int sign = 1; int carry = 0; VERIFY_CHECK(wnaf != NULL); VERIFY_CHECK(0 <= len && len <= 256); VERIFY_CHECK(a != NULL); VERIFY_CHECK(2 <= w && w <= 31); memset(wnaf, 0, len * sizeof(wnaf[0])); if (secp256k1_scalar_get_bits(&s, 255, 1)) { secp256k1_scalar_negate(&s, &s); sign = -1; } while (bit < len) { int now; int word; if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { bit++; continue; } now = w; if (now > len - bit) { now = len - bit; } word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; carry = (word >> (w-1)) & 1; word -= carry << w; wnaf[bit] = sign * word; last_set_bit = bit; bit += now; } #ifdef VERIFY CHECK(carry == 0); while (bit < 256) { CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0); } #endif return last_set_bit + 1; } struct secp256k1_strauss_point_state { #ifdef USE_ENDOMORPHISM secp256k1_scalar na_1, na_lam; int wnaf_na_1[130]; int wnaf_na_lam[130]; int bits_na_1; int bits_na_lam; #else int wnaf_na[256]; int bits_na; #endif size_t input_pos; }; struct secp256k1_strauss_state { secp256k1_gej* prej; secp256k1_fe* zr; secp256k1_ge* pre_a; #ifdef USE_ENDOMORPHISM secp256k1_ge* pre_a_lam; #endif struct secp256k1_strauss_point_state* ps; }; static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { secp256k1_ge tmpa; secp256k1_fe Z; #ifdef USE_ENDOMORPHISM /* Splitted G factors. */ secp256k1_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; int bits_ng_128 = 0; #else int wnaf_ng[256]; int bits_ng = 0; #endif int i; int bits = 0; int np; int no = 0; for (np = 0; np < num; ++np) { if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) { continue; } state->ps[no].input_pos = np; #ifdef USE_ENDOMORPHISM /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A); state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A); VERIFY_CHECK(state->ps[no].bits_na_1 <= 130); VERIFY_CHECK(state->ps[no].bits_na_lam <= 130); if (state->ps[no].bits_na_1 > bits) { bits = state->ps[no].bits_na_1; } if (state->ps[no].bits_na_lam > bits) { bits = state->ps[no].bits_na_lam; } #else /* build wnaf representation for na. */ state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A); if (state->ps[no].bits_na > bits) { bits = state->ps[no].bits_na; } #endif ++no; } /* Calculate odd multiples of a. * All multiples are brought to the same Z 'denominator', which is stored * in Z. Due to secp256k1' isomorphism we can do all operations pretending * that the Z coordinate was 1, use affine addition formulae, and correct * the Z coordinate of the result once at the end. * The exception is the precomputed G table points, which are actually * affine. Compared to the base used for other points, they have a Z ratio * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same * isomorphism to efficiently add with a known Z inverse. */ if (no > 0) { /* Compute the odd multiples in Jacobian form. */ secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); for (np = 1; np < no; ++np) { secp256k1_gej tmp = a[state->ps[np].input_pos]; #ifdef VERIFY secp256k1_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); #endif secp256k1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); secp256k1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); } /* Bring them to the same Z denominator. */ secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); } else { secp256k1_fe_set_int(&Z, 1); } #ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); } } if (ng) { /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ secp256k1_scalar_split_128(&ng_1, &ng_128, ng); /* Build wnaf representation for ng_1 and ng_128 */ bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); if (bits_ng_1 > bits) { bits = bits_ng_1; } if (bits_ng_128 > bits) { bits = bits_ng_128; } } #else if (ng) { bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); if (bits_ng > bits) { bits = bits_ng; } } #endif secp256k1_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; secp256k1_gej_double_var(r, r, NULL); #ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); } if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); } if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); } #else for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng && (n = wnaf_ng[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); } #endif } if (!r->infinity) { secp256k1_fe_mul(&r->z, &r->z, &Z); } } static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; struct secp256k1_strauss_point_state ps[1]; #ifdef USE_ENDOMORPHISM secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; #endif struct secp256k1_strauss_state state; state.prej = prej; state.zr = zr; state.pre_a = pre_a; #ifdef USE_ENDOMORPHISM state.pre_a_lam = pre_a_lam; #endif state.ps = ps; secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); } static size_t secp256k1_strauss_scratch_size(size_t n_points) { #ifdef USE_ENDOMORPHISM static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); #else static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); #endif return n_points*point_size; } static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { secp256k1_gej* points; secp256k1_scalar* scalars; struct secp256k1_strauss_state state; size_t i; secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_strauss_scratch_size(n_points), STRAUSS_SCRATCH_OBJECTS)) { return 0; } points = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_gej)); scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_scalar)); state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej)); state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); #ifdef USE_ENDOMORPHISM state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A); #else state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); #endif state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); for (i = 0; i < n_points; i++) { secp256k1_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { secp256k1_scratch_deallocate_frame(scratch); return 0; } secp256k1_gej_set_ge(&points[i], &point); } secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); secp256k1_scratch_deallocate_frame(scratch); return 1; } /* Wrapper for secp256k1_ecmult_multi_func interface */ static int secp256k1_ecmult_strauss_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { return secp256k1_ecmult_strauss_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); } static size_t secp256k1_strauss_max_points(secp256k1_scratch *scratch) { return secp256k1_scratch_max_allocation(scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); } /** Convert a number to WNAF notation. * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. * It has the following guarantees: * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w) * - the number of words set is always WNAF_SIZE(w) * - the returned skew is 0 or 1 */ static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { int skew = 0; int pos; int max_pos; int last_w; const secp256k1_scalar *work = s; if (secp256k1_scalar_is_zero(s)) { for (pos = 0; pos < WNAF_SIZE(w); pos++) { wnaf[pos] = 0; } return 0; } if (secp256k1_scalar_is_even(s)) { skew = 1; } wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew; /* Compute last window size. Relevant when window size doesn't divide the * number of bits in the scalar */ last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; /* Store the position of the first nonzero word in max_pos to allow * skipping leading zeros when calculating the wnaf. */ for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if(val != 0) { break; } wnaf[pos] = 0; } max_pos = pos; pos = 1; while (pos <= max_pos) { int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if ((val & 1) == 0) { wnaf[pos - 1] -= (1 << w); wnaf[pos] = (val + 1); } else { wnaf[pos] = val; } /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit * is strictly negative or strictly positive respectively. Only change * coefficients at previous positions because above code assumes that * wnaf[pos - 1] is odd. */ if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) { if (wnaf[pos - 1] == 1) { wnaf[pos - 2] += 1 << w; } else { wnaf[pos - 2] -= 1 << w; } wnaf[pos - 1] = 0; } ++pos; } return skew; } struct secp256k1_pippenger_point_state { int skew_na; size_t input_pos; }; struct secp256k1_pippenger_state { int *wnaf_na; struct secp256k1_pippenger_point_state* ps; }; /* * pippenger_wnaf computes the result of a multi-point multiplication as * follows: The scalars are brought into wnaf with n_wnaf elements each. Then * for every i < n_wnaf, first each point is added to a "bucket" corresponding * to the point's wnaf[i]. Second, the buckets are added together such that * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... */ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) { size_t n_wnaf = WNAF_SIZE(bucket_window+1); size_t np; size_t no = 0; int i; int j; for (np = 0; np < num; ++np) { if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) { continue; } state->ps[no].input_pos = np; state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); no++; } secp256k1_gej_set_infinity(r); if (no == 0) { return 1; } for (i = n_wnaf - 1; i >= 0; i--) { secp256k1_gej running_sum; for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { secp256k1_gej_set_infinity(&buckets[j]); } for (np = 0; np < no; ++np) { int n = state->wnaf_na[np*n_wnaf + i]; struct secp256k1_pippenger_point_state point_state = state->ps[np]; secp256k1_ge tmp; int idx; if (i == 0) { /* correct for wnaf skew */ int skew = point_state.skew_na; if (skew) { secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); } } if (n > 0) { idx = (n - 1)/2; secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); } else if (n < 0) { idx = -(n + 1)/2; secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); } } for(j = 0; j < bucket_window; j++) { secp256k1_gej_double_var(r, r, NULL); } secp256k1_gej_set_infinity(&running_sum); /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) * using an intermediate running sum: * running_sum = bucket[0] + bucket[1] + bucket[2] + ... * * The doubling is done implicitly by deferring the final window doubling (of 'r'). */ for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); secp256k1_gej_add_var(r, r, &running_sum, NULL); } secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); secp256k1_gej_double_var(r, r, NULL); secp256k1_gej_add_var(r, r, &running_sum, NULL); } return 1; } /** * Returns optimal bucket_window (number of bits of a scalar represented by a * set of buckets) for a given number of points. */ static int secp256k1_pippenger_bucket_window(size_t n) { #ifdef USE_ENDOMORPHISM if (n <= 1) { return 1; } else if (n <= 4) { return 2; } else if (n <= 20) { return 3; } else if (n <= 57) { return 4; } else if (n <= 136) { return 5; } else if (n <= 235) { return 6; } else if (n <= 1260) { return 7; } else if (n <= 4420) { return 9; } else if (n <= 7880) { return 10; } else if (n <= 16050) { return 11; } else { return PIPPENGER_MAX_BUCKET_WINDOW; } #else if (n <= 1) { return 1; } else if (n <= 11) { return 2; } else if (n <= 45) { return 3; } else if (n <= 100) { return 4; } else if (n <= 275) { return 5; } else if (n <= 625) { return 6; } else if (n <= 1850) { return 7; } else if (n <= 3400) { return 8; } else if (n <= 9630) { return 9; } else if (n <= 17900) { return 10; } else if (n <= 32800) { return 11; } else { return PIPPENGER_MAX_BUCKET_WINDOW; } #endif } /** * Returns the maximum optimal number of points for a bucket_window. */ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { #ifdef USE_ENDOMORPHISM case 1: return 1; case 2: return 4; case 3: return 20; case 4: return 57; case 5: return 136; case 6: return 235; case 7: return 1260; case 8: return 1260; case 9: return 4420; case 10: return 7880; case 11: return 16050; case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; #else case 1: return 1; case 2: return 11; case 3: return 45; case 4: return 100; case 5: return 275; case 6: return 625; case 7: return 1850; case 8: return 3400; case 9: return 9630; case 10: return 17900; case 11: return 32800; case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; #endif } return 0; } #ifdef USE_ENDOMORPHISM SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) { secp256k1_scalar tmp = *s1; secp256k1_scalar_split_lambda(s1, s2, &tmp); secp256k1_ge_mul_lambda(p2, p1); if (secp256k1_scalar_is_high(s1)) { secp256k1_scalar_negate(s1, s1); secp256k1_ge_neg(p1, p1); } if (secp256k1_scalar_is_high(s2)) { secp256k1_scalar_negate(s2, s2); secp256k1_ge_neg(p2, p2); } } #endif /** * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) { #ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; #else size_t entries = n_points + 1; #endif size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size; } static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ #ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; #else size_t entries = n_points + 1; #endif secp256k1_ge *points; secp256k1_scalar *scalars; secp256k1_gej *buckets; struct secp256k1_pippenger_state *state_space; size_t idx = 0; size_t point_idx = 0; int i, j; int bucket_window; (void)ctx; secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } bucket_window = secp256k1_pippenger_bucket_window(n_points); if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points, bucket_window), PIPPENGER_SCRATCH_OBJECTS)) { return 0; } points = (secp256k1_ge *) secp256k1_scratch_alloc(scratch, entries * sizeof(*points)); scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(scratch, entries * sizeof(*scalars)); state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space)); state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps)); state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, sizeof(*buckets) << bucket_window); if (inp_g_sc != NULL) { scalars[0] = *inp_g_sc; points[0] = secp256k1_ge_const_g; idx++; #ifdef USE_ENDOMORPHISM secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; #endif } while (point_idx < n_points) { if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { secp256k1_scratch_deallocate_frame(scratch); return 0; } idx++; #ifdef USE_ENDOMORPHISM secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; #endif point_idx++; } secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); /* Clear data */ for(i = 0; (size_t)i < idx; i++) { secp256k1_scalar_clear(&scalars[i]); state_space->ps[i].skew_na = 0; for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; } } for(i = 0; i < 1< max_alloc) { break; } space_for_points = max_alloc - space_overhead; n_points = space_for_points/entry_size; n_points = n_points > max_points ? max_points : n_points; if (n_points > res) { res = n_points; } if (n_points < max_points) { /* A larger bucket_window may support even more points. But if we * would choose that then the caller couldn't safely use any number * smaller than what this function returns */ break; } } return res; } /* Computes ecmult_multi by simply multiplying and adding each point. Does not * require a scratch space */ static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { size_t point_idx; secp256k1_scalar szero; secp256k1_gej tmpj; secp256k1_scalar_set_int(&szero, 0); secp256k1_gej_set_infinity(r); secp256k1_gej_set_infinity(&tmpj); /* r = inp_g_sc*G */ secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); for (point_idx = 0; point_idx < n_points; point_idx++) { secp256k1_ge point; secp256k1_gej pointj; secp256k1_scalar scalar; if (!cb(&scalar, &point, point_idx, cbdata)) { return 0; } /* r += scalar*point */ secp256k1_gej_set_ge(&pointj, &point); secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); secp256k1_gej_add_var(r, r, &tmpj, NULL); } return 1; } /* Compute the number of batches and the batch size given the maximum batch size and the * total number of points */ static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { if (max_n_batch_points == 0) { return 0; } if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) { max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; } if (n == 0) { *n_batches = 0; *n_batch_points = 0; return 1; } /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */ *n_batches = 1 + (n - 1) / max_n_batch_points; *n_batch_points = 1 + (n - 1) / *n_batches; return 1; } typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; int (*f)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n == 0) { return 1; } else if (n == 0) { secp256k1_scalar szero; secp256k1_scalar_set_int(&szero, 0); secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc); return 1; } if (scratch == NULL) { return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. * As a first step check if there's enough space for Pippenger's algo (which requires less space * than Strauss' algo) and if not, use the simple algorithm. */ if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(scratch), n)) { return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { f = secp256k1_ecmult_pippenger_batch; } else { if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(scratch), n)) { return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } f = secp256k1_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; secp256k1_gej tmp; if (!f(ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } secp256k1_gej_add_var(r, r, &tmp, NULL); n -= nbp; } return 1; } #endif /* SECP256K1_ECMULT_IMPL_H */