diff --git a/contrib/teamcity/build-configurations.yml b/contrib/teamcity/build-configurations.yml index cc791e0d5..c356c3e93 100644 --- a/contrib/teamcity/build-configurations.yml +++ b/contrib/teamcity/build-configurations.yml @@ -1,435 +1,428 @@ --- # Templates can be referenced in builds to avoid duplication templates: gitian_builds: script: contrib/teamcity/gitian.sh timeout: 7200 artifacts: gitian-results: '' check+secp256k1: targets: - - all - install - install-secp256k1 - - check - check-secp256k1 - check-functional secp256k1: Werror: true targets: - - secp256k1 - install-secp256k1 - - check-secp256k1 # The build descriptions. # If a script is defined, then this will be the only step to run. # Otherwise a list of targets can be specified, grouped by parallel runs. # Example: # targets: # - - build11 # - build12 # - - build21 # - build22 # Will run: # ninja build11 build12 # ninja build21 build22 builds: build-asan: Werror: true clang: true fail_fast: true cmake_flags: - '-DCMAKE_CXX_FLAGS=-DARENA_DEBUG' - '-DCRYPTO_USE_ASM=OFF' - '-DENABLE_SANITIZERS=address' templates: - check+secp256k1 timeout: 2400 env: ASAN_OPTIONS: log_path=stdout LSAN_OPTIONS: log_path=stdout build-bench: Werror: true cmake_flags: - '-DSECP256K1_ENABLE_MODULE_ECDH=ON' - '-DSECP256K1_ENABLE_MODULE_MULTISET=ON' targets: - - all - install-bitcoin-bench - install-secp256k1-bench - - bench-bitcoin - - bench-secp256k1 timeout: 1200 build-clang: runOnDiff: true Werror: true clang: true targets: - - all - install - install-secp256k1 - - check - check-secp256k1 timeout: 1200 build-clang-tidy: runOnDiff: true clang: true cmake_flags: - '-DENABLE_CLANG_TIDY=ON' targets: - - all - - check timeout: 1800 build-coverage: gcc: true cmake_flags: - '-DENABLE_COVERAGE=ON' - '-DENABLE_BRANCH_COVERAGE=ON' targets: - - coverage-check-extended post_build: | "${TOPLEVEL}/contrib/teamcity/upload-coverage.sh" check-extended timeout: 4800 artifacts: coverage.tar.gz: coverage.tar.gz build-debug: runOnDiff: true Werror: true cmake_flags: - '-DCMAKE_BUILD_TYPE=Debug' templates: - check+secp256k1 timeout: 1200 build-diff: runOnDiff: true Werror: true targets: - - all - install - install-secp256k1 - - check-all - check-upgrade-activated timeout: 1200 build-docs: targets: - - doc-rpc post_build: | xvfb-run -a -e /dev/stderr ninja install-manpages-html timeout: 600 artifacts: doc/*: doc build-ibd: targets: - - bitcoind post_build: | "${TOPLEVEL}/contrib/teamcity/ibd.sh" -disablewallet -debug=net timeout: 14400 artifacts: ibd/debug.log: log/debug.log build-ibd-no-assumevalid-checkpoint: targets: - - bitcoind post_build: | "${TOPLEVEL}/contrib/teamcity/ibd.sh" -disablewallet -assumevalid=0 -checkpoints=0 -debug=net timeout: 21600 artifacts: ibd/debug.log: log/debug.log build-linux32: cross_build: static_depends: linux32 toolchain: Linux32 templates: - check+secp256k1 timeout: 3600 build-linux64: cross_build: static_depends: linux64 toolchain: Linux64 templates: - check+secp256k1 timeout: 3600 build-linux-aarch64: cross_build: static_depends: linux-aarch64 toolchain: LinuxAArch64 emulator: qemu-aarch64-static cmake_flags: # The ZMQ functional test will fail with qemu (due to a qemu limitation), # so disable it to avoid the failure. # Extracted from stderr: # Unknown host QEMU_IFLA type: 50 # Unknown host QEMU_IFLA type: 51 # Unknown QEMU_IFLA_BRPORT type 33 - "-DBUILD_BITCOIN_ZMQ=OFF" # This is an horrible hack to workaround a qemu bug: # https://bugs.launchpad.net/qemu/+bug/1748612 # Qemu emits a message for unsupported features called by the guest. # Because the output filtering is not working at all, it causes the # qemu stderr to end up in the node stderr and fail the functional # tests. # Disabling the unsupported feature (here bypassing the config # detection) fixes the issue. # FIXME: get rid of the hack, either by using a better qemu version # or by filtering stderr at the framework level. - "-DHAVE_DECL_GETIFADDRS=OFF" templates: - check+secp256k1 timeout: 3600 env: QEMU_LD_PREFIX: /usr/aarch64-linux-gnu build-linux-arm: cross_build: static_depends: linux-arm toolchain: LinuxARM emulator: qemu-arm-static cmake_flags: # The ZMQ functional test will fail with qemu (due to a qemu limitation), # so disable it to avoid the failure. # Extracted from stderr: # Unknown host QEMU_IFLA type: 50 # Unknown host QEMU_IFLA type: 51 # Unknown QEMU_IFLA_BRPORT type 33 - "-DBUILD_BITCOIN_ZMQ=OFF" # This is an horrible hack to workaround a qemu bug: # https://bugs.launchpad.net/qemu/+bug/1748612 # Qemu emits a message for unsupported features called by the guest. # Because the output filtering is not working at all, it causes the # qemu stderr to end up in the node stderr and fail the functional # tests. # Disabling the unsupported feature (here bypassing the config # detection) fixes the issue. # FIXME: get rid of the hack, either by using a better qemu version # or by filtering stderr at the framework level. - "-DHAVE_DECL_GETIFADDRS=OFF" templates: - check+secp256k1 timeout: 3600 env: QEMU_LD_PREFIX: /usr/arm-linux-gnueabihf build-make-generator: generator: name: 'Unix Makefiles' command: make flags: - '-k' templates: - check+secp256k1 timeout: 1200 build-master: Werror: true targets: - - all - install - install-secp256k1 - - check-extended - check-upgrade-activated-extended timeout: 4800 build-native-osx: templates: - check+secp256k1 timeout: 3600 build-osx: cross_build: static_depends: osx toolchain: OSX targets: - - all - install - install-secp256k1 - install-tests post_build: | export PYTHONPATH="${TOPLEVEL}/depends/x86_64-apple-darwin16/native/lib/python3/dist-packages:${PYTHONPATH:-}" ninja osx-dmg timeout: 3600 artifacts: Bitcoin-ABC.dmg: Bitcoin-ABC.dmg build-secp256k1: cmake_flags: - '-DSECP256K1_ENABLE_MODULE_ECDH=ON' - '-DSECP256K1_ENABLE_MODULE_MULTISET=ON' templates: - secp256k1 timeout: 600 - build-secp256k1-without-endomorphism: - cmake_flags: - - "-DSECP256K1_ENABLE_ENDOMORPHISM=OFF" - templates: - - secp256k1 - timeout: 600 - build-secp256k1-java: cmake_flags: - '-DSECP256K1_ENABLE_MODULE_ECDH=ON' - '-DSECP256K1_ENABLE_JNI=ON' - '-DUSE_JEMALLOC=OFF' templates: - secp256k1 timeout: 600 build-tsan: Werror: true clang: true fail_fast: true cmake_flags: - '-DENABLE_SANITIZERS=thread' targets: - - all - install - - check - check-functional timeout: 2400 env: TSAN_OPTIONS: log_path=stdout build-ubsan: Werror: true clang: true fail_fast: true cmake_flags: - '-DENABLE_SANITIZERS=undefined' templates: - check+secp256k1 timeout: 2400 env: UBSAN_OPTIONS: log_path=stdout build-win64: cross_build: static_depends: win64 toolchain: Win64 cmake_flags: - "-DBUILD_BITCOIN_SEEDER=OFF" - "-DCPACK_STRIP_FILES=ON" - "-DUSE_JEMALLOC=OFF" targets: - - all - install - install-secp256k1 - install-tests - - package post_build: | wine "${ARTIFACT_DIR}/bin/test_bitcoin.exe" --run_test=\!radix_tests,rcu_tests timeout: 3600 artifacts: bitcoin-abc-*-x86_64-w64-mingw32.exe: bitcoin-abc-x86_64-w64-mingw32.exe build-without-bip70: Werror: true cmake_flags: - '-DENABLE_BIP70=OFF' targets: - - all - install - - check - check-functional timeout: 1800 build-without-cli: Werror: true cmake_flags: - '-DBUILD_BITCOIN_CLI=OFF' targets: - - all - install - - check-functional timeout: 1200 build-without-qt: Werror: true cmake_flags: - '-DBUILD_BITCOIN_QT=OFF' targets: - - all - install - - check timeout: 1200 build-without-wallet: runOnDiff: true Werror: true cmake_flags: - '-DBUILD_BITCOIN_WALLET=OFF' targets: - - all - install - - check - check-functional timeout: 1200 build-without-zmq: Werror: true cmake_flags: - '-DBUILD_BITCOIN_ZMQ=OFF' targets: - - all - install - - check - check-functional timeout: 1800 check-buildbot: targets: - - check-buildbot timeout: 600 check-seeds: targets: - - bitcoind - bitcoin-cli post_build: | # Run on different ports to avoid a race where the rpc port used in the first run # may not be closed in time for the second to start. SEEDS_DIR="${TOPLEVEL}"/contrib/seeds RPC_PORT=18832 "${SEEDS_DIR}"/check-seeds.sh main 80 RPC_PORT=18833 "${SEEDS_DIR}"/check-seeds.sh test 70 timeout: 600 check-source-control-tools: cmake_flags: - '-DENABLE_SOURCE_CONTROL_TOOLS_TESTS=ON' targets: - - check-source-control-tools timeout: 600 gitian-linux: templates: - gitian_builds env: OS_NAME: linux gitian-osx: templates: - gitian_builds env: OS_NAME: osx gitian-win: templates: - gitian_builds env: OS_NAME: win lint-circular-dependencies: runOnDiff: true script: test/lint/lint-circular-dependencies.sh diff --git a/src/secp256k1/.travis.yml b/src/secp256k1/.travis.yml index e4da58adb..0200cb0ab 100644 --- a/src/secp256k1/.travis.yml +++ b/src/secp256k1/.travis.yml @@ -1,127 +1,118 @@ language: c os: - linux - osx dist: bionic # Valgrind currently supports upto macOS 10.13, the latest xcode of that version is 10.1 osx_image: xcode10.1 addons: apt: packages: - gcc-multilib - libc6-dbg:i386 - libgmp-dev - libgmp-dev:i386 - libtool-bin - ninja-build - valgrind install: - if [ "${TRAVIS_CPU_ARCH}" = "amd64" ]; then ./travis/install_cmake.sh; fi - if [ "${TRAVIS_OS_NAME}" = "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install gcc@9 gmp ninja openssl valgrind; fi cache: directories: - /opt/cmake compiler: - clang - gcc env: global: - WIDEMUL=auto - BIGNUM=gmp - - ENDOMORPHISM=no - STATICPRECOMPUTATION=yes - ECMULTGENPRECISION=auto - ASM=no - AUTOTOOLS_TARGET=check - CMAKE_TARGET=check-secp256k1 - AUTOTOOLS_EXTRA_FLAGS= - CMAKE_EXTRA_FLAGS= - WITH_VALGRIND=yes - RUN_VALGRIND=no - HOST= - ECDH=no - RECOVERY=no - SCHNORR=yes - SCHNORRSIG=no - EXPERIMENTAL=no - JNI=no - OPENSSL_TESTS=auto - MULTISET=no - CTIMETEST=yes - BENCH=yes - ITERS=2 jobs: - WIDEMUL=int64 RECOVERY=yes - WIDEMUL=int64 ECDH=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes - - WIDEMUL=int64 ENDOMORPHISM=yes - WIDEMUL=int128 - WIDEMUL=int128 RECOVERY=yes EXPERIMENTAL=yes SCHNORRSIG=yes - - WIDEMUL=int128 ENDOMORPHISM=yes - - WIDEMUL=int128 ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes + - WIDEMUL=int128 ECDH=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes - WIDEMUL=int128 ASM=x86_64 - - WIDEMUL=int128 ENDOMORPHISM=yes ASM=x86_64 - BIGNUM=no - - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes + - BIGNUM=no RECOVERY=yes EXPERIMENTAL=yes MULTISET=yes SCHNORRSIG=yes - BIGNUM=no STATICPRECOMPUTATION=no - AUTOTOOLS_TARGET=distcheck CMAKE_TARGET=install WITH_VALGRIND=no CTIMETEST=no BENCH=no - AUTOTOOLS_EXTRA_FLAGS=CPPFLAGS=-DDETERMINISTIC CMAKE_EXTRA_FLAGS=-DCMAKE_C_FLAGS=-DDETERMINISTIC - AUTOTOOLS_EXTRA_FLAGS=CFLAGS=-O0 CMAKE_EXTRA_FLAGS=-DCMAKE_BUILD_TYPE=Debug CTIMETEST=no - AUTOTOOLS_TARGET=check-java CMAKE_TARGET=check-secp256k1-java JNI=yes ECDH=yes EXPERIMENTAL=yes WITH_VALGRIND=no CTIMETEST=no BENCH=no - ECMULTGENPRECISION=2 - ECMULTGENPRECISION=8 - RUN_VALGRIND=yes - BIGNUM=no ENDOMORPHISM=yes ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes OPENSSL_TESTS=no MULTISET=yes - AUTOTOOLS_TARGET= - CMAKE_EXTRA_FLAGS=-DCMAKE_C_FLAGS=-DVALGRIND CMAKE_TARGET="secp256k1-tests secp256k1-exhaustive_tests" - # The same as above but without endomorphism. - - RUN_VALGRIND=yes - BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes OPENSSL_TESTS=no MULTISET=yes + BIGNUM=no ASM=x86_64 EXPERIMENTAL=yes ECDH=yes RECOVERY=yes OPENSSL_TESTS=no MULTISET=yes AUTOTOOLS_TARGET= CMAKE_EXTRA_FLAGS=-DCMAKE_C_FLAGS=-DVALGRIND CMAKE_TARGET="secp256k1-tests secp256k1-exhaustive_tests" - SCHNORR=no jobs: fast_finish: true include: - compiler: clang os: linux - env: HOST=i686-linux-gnu ENDOMORPHISM=yes OPENSSL_TESTS=no + env: HOST=i686-linux-gnu OPENSSL_TESTS=no - compiler: clang os: linux env: HOST=i686-linux-gnu BIGNUM=no OPENSSL_TESTS=no - compiler: gcc os: linux - env: HOST=i686-linux-gnu ENDOMORPHISM=yes BIGNUM=no OPENSSL_TESTS=no + env: HOST=i686-linux-gnu BIGNUM=no OPENSSL_TESTS=no - compiler: gcc os: linux env: HOST=i686-linux-gnu OPENSSL_TESTS=no # S390x build (big endian system) - dist: focal compiler: gcc env: HOST=s390x-unknown-linux-gnu OPENSSL_TESTS=no ECDH=yes RECOVERY=yes EXPERIMENTAL=yes MULTISET=yes CTIMETEST=no arch: s390x addons: apt: packages: - cmake - libgmp-dev - libtool-bin - ninja-build - valgrind before_script: # This limits the iterations in the benchmarks below to ITER iterations. - export SECP256K1_BENCH_ITERS="$ITERS" # travis auto terminates jobs that go for 10 minutes without printing to stdout, # but travis_wait doesn't work well with forking programs like valgrind # (https://docs.travis-ci.com/user/common-build-problems/#build-times-out-because-no-output-was-received https://github.com/bitcoin-core/secp256k1/pull/750#issuecomment-623476860) script: - function keep_alive() { while true; do echo -en "\a"; sleep 60; done } - keep_alive & - ./travis/build_autotools.sh - ./travis/build_cmake.sh - kill %keep_alive after_script: - valgrind --version diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt index 66042c435..24fa0747d 100644 --- a/src/secp256k1/CMakeLists.txt +++ b/src/secp256k1/CMakeLists.txt @@ -1,400 +1,394 @@ # Copyright (c) 2017 The Bitcoin developers cmake_minimum_required(VERSION 3.16) project(secp256k1 LANGUAGES C VERSION 0.1.0) # Add path for custom modules when building as a standalone project list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules) # Default to RelWithDebInfo configuration if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "Select the configuration for the build" FORCE) set(__NO_USER_CMAKE_BUILD_TYPE ON CACHE BOOL "True if the user didn't set a build type on the command line") endif() option(SECP256K1_ENABLE_COVERAGE "Enable coverage" OFF) option(SECP256K1_ENABLE_BRANCH_COVERAGE "Enable branch coverage" OFF) include(AddCompilerFlags) if(SECP256K1_ENABLE_COVERAGE) include(Coverage) enable_coverage(${SECP256K1_ENABLE_BRANCH_COVERAGE}) exclude_from_coverage("${CMAKE_CURRENT_SOURCE_DIR}/src/bench") # If no build type is manually defined, override the optimization level. # Otherwise, alert the user than the coverage result might be useless. if(__NO_USER_CMAKE_BUILD_TYPE) set_c_optimization_level(0) else() message(WARNING "It is advised to not enforce CMAKE_BUILD_TYPE to get the best coverage results") endif() set(COVERAGE 1) endif() # libsecp256k1 use a different set of flags. add_c_compiler_flags( -pedantic -Wall -Wextra -Wcast-align -Wshadow -Wundef -Wno-unused-function -Wno-overlength-strings -std=c89 -Wnested-externs -Wstrict-prototypes -Wno-long-long ) # Default visibility is hidden on all targets. set(CMAKE_C_VISIBILITY_PRESET hidden) include_directories( . src # For the config ${CMAKE_CURRENT_BINARY_DIR}/src ) # The library add_library(secp256k1 src/secp256k1.c) target_include_directories(secp256k1 PUBLIC include) set(SECP256K1_PUBLIC_HEADERS include/secp256k1.h include/secp256k1_preallocated.h ) option(SECP256K1_ENABLE_BIGNUM "Use the GMP bignum implementation" OFF) if(SECP256K1_ENABLE_BIGNUM) # We need to link in GMP find_package(GMP REQUIRED) target_link_libraries(secp256k1 GMP::gmp) set(USE_NUM_GMP 1) set(USE_FIELD_INV_NUM 1) set(USE_SCALAR_INV_NUM 1) else() set(USE_NUM_NONE 1) set(USE_FIELD_INV_BUILTIN 1) set(USE_SCALAR_INV_BUILTIN 1) endif() # Guess the target architecture, within the ones with supported ASM. # First check if the CMAKE_C_COMPILER_TARGET is set (should be when # cross compiling), then CMAKE_SYSTEM_PROCESSOR as a fallback if meaningful # (this is not the case for ARM as the content is highly non standard). if(CMAKE_C_COMPILER_TARGET MATCHES "x86_64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") set(SECP256K1_ASM_BUILD_TARGET "x86_64") set(SECP256K1_DEFAULT_USE_ASM ON) elseif(CMAKE_C_COMPILER_TARGET MATCHES "arm-linux-gnueabihf") set(SECP256K1_ASM_BUILD_TARGET "arm-linux-gnueabihf") set(SECP256K1_DEFAULT_USE_ASM ON) endif() # Enable ASM by default only if we are building for a compatible target. # The user can still enable/disable it manually if needed. option(SECP256K1_USE_ASM "Use assembly" ${SECP256K1_DEFAULT_USE_ASM}) if(SECP256K1_USE_ASM) macro(unsupported_asm_error) message(FATAL_ERROR "Assembly is enabled, but not supported for your target architecture." "Re-run cmake with -DSECP256K1_USE_ASM=OFF to disable ASM support." ) endmacro() if(SECP256K1_ASM_BUILD_TARGET MATCHES "x86_64") # We check if amd64 asm is supported. check_c_source_compiles(" #include int main() { uint64_t a = 11, tmp; __asm__ __volatile__(\"movq \$0x100000000,%1; mulq %%rsi\" : \"+a\"(a) : \"S\"(tmp) : \"cc\", \"%rdx\"); return 0; } " USE_ASM_X86_64) if(NOT USE_ASM_X86_64) unsupported_asm_error() endif() elseif(SECP256K1_ASM_BUILD_TARGET MATCHES "arm-linux-gnueabihf") enable_language(ASM) set(USE_EXTERNAL_ASM 1) add_library(secp256k1_common src/asm/field_10x26_arm.s) target_link_libraries(secp256k1 secp256k1_common) else() unsupported_asm_error() endif() endif() set(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY "" CACHE STRING "Test-only override of the (autodetected by the C code) \"widemul\" setting (can be int64 or int128)") if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY STREQUAL "int128") message(STATUS "Force the use of the (unsigned) __int128 based wide multiplication implementation") target_compile_definitions(secp256k1 PUBLIC USE_FORCE_WIDEMUL_INT128=1) elseif(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY STREQUAL "int64") message(STATUS "Force the use of the (u)int64_t based wide multiplication implementation") target_compile_definitions(secp256k1 PUBLIC USE_FORCE_WIDEMUL_INT64=1) endif() option(SECP256K1_BUILD_TEST "Build secp256k1's unit tests" ON) include(CMakeDependentOption) cmake_dependent_option( SECP256K1_BUILD_OPENSSL_TESTS "Build the OpenSSL tests" ON SECP256K1_BUILD_TEST ON ) if(SECP256K1_BUILD_OPENSSL_TESTS) include(BrewHelper) find_brew_prefix(OPENSSL_ROOT_DIR openssl) find_package(OpenSSL COMPONENTS Crypto) if(NOT OpenSSL_FOUND) message(FATAL_ERROR "OpenSSL is not found, but is required for some tests. You can disable them by passing -DSECP256K1_BUILD_OPENSSL_TESTS=OFF." ) endif() set(ENABLE_OPENSSL_TESTS 1) endif() # Executable internal to secp256k1 need to have the HAVE_CONFIG_H define set. # For convenience, we wrap this into a function. function(link_secp256k1_internal NAME) target_link_libraries(${NAME} secp256k1) target_compile_definitions(${NAME} PRIVATE HAVE_CONFIG_H SECP256K1_BUILD) if(SECP256K1_BUILD_OPENSSL_TESTS) target_link_libraries(${NAME} OpenSSL::Crypto) endif() endfunction(link_secp256k1_internal) include(InstallationHelper) # Phony target to build benchmarks add_custom_target(bench-secp256k1) function(add_secp256k1_bench NAME) set(EXECUTABLE_NAME "${NAME}-bench") add_executable(${EXECUTABLE_NAME} ${ARGN}) link_secp256k1_internal(${EXECUTABLE_NAME}) set(BENCH_NAME "bench-secp256k1-${NAME}") add_custom_target(${BENCH_NAME} COMMENT "Benchmarking libsecp256k1 ${NAME}" COMMAND ${EXECUTABLE_NAME} USES_TERMINAL ) add_dependencies(bench-secp256k1 ${BENCH_NAME}) install_target("${EXECUTABLE_NAME}" COMPONENT secp256k1-bench EXCLUDE_FROM_ALL ) endfunction(add_secp256k1_bench) # ECDH module option(SECP256K1_ENABLE_MODULE_ECDH "Build libsecp256k1's ECDH module" OFF) if(SECP256K1_ENABLE_MODULE_ECDH) set(ENABLE_MODULE_ECDH 1) add_secp256k1_bench(ecdh src/bench_ecdh.c) list(APPEND SECP256K1_PUBLIC_HEADERS include/secp256k1_ecdh.h) endif() # MultiSet module option(SECP256K1_ENABLE_MODULE_MULTISET "Build libsecp256k1's MULTISET module" OFF) if(SECP256K1_ENABLE_MODULE_MULTISET) set(ENABLE_MODULE_MULTISET 1) add_secp256k1_bench(multiset src/bench_multiset.c) list(APPEND SECP256K1_PUBLIC_HEADERS include/secp256k1_multiset.h) endif() # Recovery module option(SECP256K1_ENABLE_MODULE_RECOVERY "Build libsecp256k1's recovery module" ON) if(SECP256K1_ENABLE_MODULE_RECOVERY) set(ENABLE_MODULE_RECOVERY 1) add_secp256k1_bench(recover src/bench_recover.c) list(APPEND SECP256K1_PUBLIC_HEADERS include/secp256k1_recovery.h) endif() # Schnorr module option(SECP256K1_ENABLE_MODULE_SCHNORR "Build libsecp256k1's Schnorr module" ON) if(SECP256K1_ENABLE_MODULE_SCHNORR) set(ENABLE_MODULE_SCHNORR 1) list(APPEND SECP256K1_PUBLIC_HEADERS include/secp256k1_schnorr.h) endif() # Extrakeys module option(SECP256K1_ENABLE_MODULE_EXTRAKEYS "Build libsecp256k1's Extrakeys module" OFF) if(SECP256K1_ENABLE_MODULE_EXTRAKEYS) set(ENABLE_MODULE_EXTRAKEYS 1) list(APPEND SECP256K1_PUBLIC_HEADERS include/secp256k1_extrakeys.h) endif() # Schnorrsig module option(SECP256K1_ENABLE_MODULE_SCHNORRSIG "Build libsecp256k1's Schnorrsig module" OFF) if(SECP256K1_ENABLE_MODULE_SCHNORRSIG) if(NOT SECP256K1_ENABLE_MODULE_EXTRAKEYS) message(FATAL_ERROR "The module Schnorrsig require Extrakeys. Try running cmake using -DSECP256K1_ENABLE_MODULE_EXTRAKEYS=On") endif() set(ENABLE_MODULE_SCHNORRSIG 1) add_secp256k1_bench(schnorrsig src/bench_schnorrsig.c) list(APPEND SECP256K1_PUBLIC_HEADERS include/secp256k1_schnorrsig.h) endif() # External default callbacks option(SECP256K1_ENABLE_EXTERNAL_DEFAULT_CALLBACKS "Enable external default callbacks" OFF) if(SECP256K1_ENABLE_EXTERNAL_DEFAULT_CALLBACKS) set(USE_EXTERNAL_DEFAULT_CALLBACKS 1) endif() -# Endomorphism -option(SECP256K1_ENABLE_ENDOMORPHISM "Enable endomorphism" ON) -if(SECP256K1_ENABLE_ENDOMORPHISM) - set(USE_ENDOMORPHISM 1) -endif() - # Make the emult window size customizable. set(SECP256K1_ECMULT_WINDOW_SIZE 15 CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24].") if(${SECP256K1_ECMULT_WINDOW_SIZE} LESS 2 OR ${SECP256K1_ECMULT_WINDOW_SIZE} GREATER 24) message(FATAL_ERROR "SECP256K1_ECMULT_WINDOW_SIZE must be an integer in range [2..24]") endif() set(SECP256K1_ECMULT_GEN_PRECISION 4 CACHE STRING "Precision bits to tune the precomputed table size for signing.") set(VALID_PRECISIONS 2 4 8) if(NOT ${SECP256K1_ECMULT_GEN_PRECISION} IN_LIST VALID_PRECISIONS) message(FATAL_ERROR "SECP256K1_ECMULT_GEN_PRECISION not 2, 4, 8") endif() # Static precomputation for elliptic curve multiplication option(SECP256K1_ECMULT_STATIC_PRECOMPUTATION "Precompute libsecp256k1's elliptic curve multiplication tables" ON) if(SECP256K1_ECMULT_STATIC_PRECOMPUTATION) set(USE_ECMULT_STATIC_PRECOMPUTATION 1) include(NativeExecutable) native_add_cmake_flags( "-DSECP256K1_ECMULT_WINDOW_SIZE=${SECP256K1_ECMULT_WINDOW_SIZE}" "-DSECP256K1_ECMULT_GEN_PRECISION=${SECP256K1_ECMULT_GEN_PRECISION}" "-DSECP256K1_USE_ASM=OFF" "-DSECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY=${SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY}" "-DSECP256K1_BUILD_OPENSSL_TESTS=OFF" ) add_native_executable(gen_context src/gen_context.c) add_custom_command( OUTPUT src/ecmult_static_context.h COMMAND gen_context ) target_sources(secp256k1 PRIVATE src/ecmult_static_context.h) endif() # If this project is not the top level project, then don't install by default get_directory_property(SECP256K1_PARENT_DIRECTORY PARENT_DIRECTORY) if(SECP256K1_PARENT_DIRECTORY) set(SECP256K1_INSTALL_EXCLUDE_FROM_ALL EXCLUDE_FROM_ALL) endif() if(BUILD_SHARED_LIBS) install_shared_library(secp256k1 PUBLIC_HEADER ${SECP256K1_PUBLIC_HEADERS} ${SECP256K1_INSTALL_EXCLUDE_FROM_ALL} ) else() set_property(TARGET secp256k1 PROPERTY PUBLIC_HEADER ${SECP256K1_PUBLIC_HEADERS}) install_target(secp256k1 ${SECP256K1_INSTALL_EXCLUDE_FROM_ALL}) endif() # Generate the config configure_file(src/libsecp256k1-config.h.cmake.in src/libsecp256k1-config.h ESCAPE_QUOTES) target_compile_definitions(secp256k1 PRIVATE HAVE_CONFIG_H SECP256K1_BUILD) # Build the Java binding option(SECP256K1_ENABLE_JNI "Enable the Java Native Interface binding" OFF) if(SECP256K1_ENABLE_JNI) if(NOT SECP256K1_ENABLE_MODULE_ECDH) message(FATAL_ERROR "The secp256k1 JNI support requires ECDH. Try again with -DSECP256K1_ENABLE_MODULE_ECDH=ON.") endif() find_package(Java REQUIRED) find_package(JNI REQUIRED) include(UseJava) add_library(secp256k1_jni SHARED src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c ) install_shared_library(secp256k1_jni ${SECP256K1_INSTALL_EXCLUDE_FROM_ALL}) target_include_directories(secp256k1_jni PUBLIC ${JNI_INCLUDE_DIRS}) # As per CMake documentation: the POSITION_INDEPENDENT_CODE property is set # when a target is created. It defaults to True for SHARED or MODULE library # targets and False otherwise. # The secp256ki_jni library being shared, the property is set and it will # build with PIC enabled. But the secp256k1 dependency might not have the # property set, so it's associated source files won't be built with PIC # enabled. That would cause the linker to fail. # Forcing the property for the secp256k1 library fixes the issue. set_target_properties(secp256k1 PROPERTIES POSITION_INDEPENDENT_CODE ON) link_secp256k1_internal(secp256k1_jni) endif() # Tests if(SECP256K1_BUILD_TEST) include(TestSuite) create_test_suite(secp256k1) function(create_secp256k1_test NAME FILES) add_test_to_suite(secp256k1 ${NAME} EXCLUDE_FROM_ALL ${FILES}) link_secp256k1_internal(${NAME}) endfunction() create_secp256k1_test(secp256k1-tests src/tests.c) create_secp256k1_test(secp256k1-exhaustive_tests src/tests_exhaustive.c) # This should not be enabled at the same time as coverage is. # The VERIFY failure branch is not expected to be reached, so it would make # coverage appear lower if set. if(NOT SECP256K1_ENABLE_COVERAGE) target_compile_definitions(secp256k1-tests PRIVATE VERIFY) target_compile_definitions(secp256k1-exhaustive_tests PRIVATE VERIFY) endif() if(SECP256K1_ENABLE_JNI) set(SECP256k1_JNI_TEST_JAR "secp256k1-jni-test") set(CMAKE_JNI_TARGET TRUE) add_jar(secp256k1-jni-test-jar SOURCES src/java/org/bitcoin/NativeSecp256k1.java src/java/org/bitcoin/NativeSecp256k1Test.java src/java/org/bitcoin/NativeSecp256k1Util.java src/java/org/bitcoin/Secp256k1Context.java ENTRY_POINT org/bitcoin/NativeSecp256k1Test OUTPUT_NAME "${SECP256k1_JNI_TEST_JAR}" ) add_dependencies(secp256k1-jni-test-jar secp256k1_jni) add_custom_target(check-secp256k1-java COMMAND "${Java_JAVA_EXECUTABLE}" "-Djava.library.path=${CMAKE_CURRENT_BINARY_DIR}" "-jar" "${SECP256k1_JNI_TEST_JAR}.jar" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" ) add_dependencies(check-secp256k1-java secp256k1-jni-test-jar) add_dependencies(check-secp256k1 check-secp256k1-java) endif() endif(SECP256K1_BUILD_TEST) # Benchmarks add_secp256k1_bench(verify src/bench_verify.c) add_secp256k1_bench(sign src/bench_sign.c) add_secp256k1_bench(internal src/bench_internal.c) add_secp256k1_bench(ecmult src/bench_ecmult.c) diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md index 00a7a1db1..9e76554d3 100644 --- a/src/secp256k1/README.md +++ b/src/secp256k1/README.md @@ -1,146 +1,146 @@ libsecp256k1 ============ [![Build Status](https://travis-ci.org/bitcoin-abc/secp256k1.svg?branch=master)](https://travis-ci.org/bitcoin-abc/secp256k1) Optimized C library for cryptographic operations on curve secp256k1. This library is used for consensus critical cryptographic operations on the Bitcoin Cash network. It is maintained within the Bitcoin ABC repository, and is mirrored as a separate repository for ease of reuse in other Bitcoin Cash projects. Developers who want to contribute may do so at [reviews.bitcoinabc.org](https://reviews.bitcoinabc.org/). Use at your own risk. This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin Cash system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose. Features: * secp256k1 ECDSA signing/verification and key generation. * secp256k1 Schnorr signing/verification ([Bitcoin Cash Schnorr variant](https://www.bitcoincash.org/spec/2019-05-15-schnorr.html)). * Additive and multiplicative tweaking of secret/public keys. * Serialization/parsing of secret keys, public keys, signatures. * Constant time, constant memory access signing and pubkey generation. * Derandomized ECDSA (via RFC6979 or with a caller provided function.) * Very efficient implementation. * Suitable for embedded systems. * Optional module for public key recovery. * Optional module for ECDH key exchange (experimental). * Optional module for multiset hash (experimental). Experimental features have not received enough scrutiny to satisfy the standard of quality of this library but are made available for testing and review by the community. The APIs of these features should not be considered stable. Implementation details ---------------------- * General * No runtime heap allocation. * Extensive testing infrastructure. * Structured to facilitate review and analysis. * Intended to be portable to any system with a C89 compiler and uint64_t support. * No use of floating types. * Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") * Field operations * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). * Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys). * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). * Field inverses and square roots using a sliding window over blocks of 1s (by Peter Dettman). * Scalar operations * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. * Using 4 64-bit limbs (relying on __int128 support in the compiler). * Using 8 32-bit limbs. * Group operations * Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). * Use addition between points in Jacobian and affine coordinates where possible. * Use a unified addition/doubling formula where necessary to avoid data-dependent branches. * Point/x comparison without a field inversion by comparison in the Jacobian coordinate space. * Point multiplication for verification (a*P + b*G). * Use wNAF notation for point multiplicands. * Use a much larger window for multiples of G, using precomputed multiples. * Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. - * Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. + * Use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. * Point multiplication for signing * Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. * Intended to be completely free of timing sidechannels for secret-key operations (on reasonable hardware/toolchains) * Access the table with branch-free conditional moves so memory access is uniform. * No data-dependent branches * Optional runtime blinding which attempts to frustrate differential power analysis. * The precomputed tables add and eventually subtract points for which no known scalar (secret key) is known, preventing even an attacker with control over the secret key used to control the data internally. Build steps ----------- libsecp256k1 can be built using autotools: ```bash ./autogen.sh mkdir build cd build ../configure make make check sudo make install # optional ``` Or using CMake: ```bash mkdir build cd build cmake -GNinja .. ninja ninja check-secp256k1 sudo ninja install # optional ``` Exhaustive tests ----------- $ ./exhaustive_tests With valgrind, you might need to increase the max stack size: $ valgrind --max-stackframe=2500000 ./exhaustive_tests Test coverage ----------- This library aims to have full coverage of the reachable lines and branches. __To create a test coverage report with autotools:__ Configure with `--enable-coverage` (use of GCC is necessary): $ ./configure --enable-coverage Run the tests: $ make check To create a report, `gcovr` is recommended, as it includes branch coverage reporting: $ gcovr --exclude 'src/bench*' --print-summary To create a HTML report with coloured and annotated source code: $ gcovr --exclude 'src/bench*' --html --html-details -o coverage.html __To create a test coverage report with CMake:__ Make sure you installed the dependencies first, and they are in your `PATH`: `c++filt`, `gcov`, `genhtml`, `lcov` and `python3`. Then run the build, tests and generate the coverage report with: ```bash mkdir coverage cd coverage cmake -GNinja .. \ -DCMAKE_C_COMPILER=gcc \ -DSECP256K1_ENABLE_COVERAGE=ON \ -DSECP256K1_ENABLE_BRANCH_COVERAGE=ON # optional ninja coverage-check-secp256k1 ``` The coverage report will be available by opening the file `check-secp256k1.coverage/index.html` with a web browser. Reporting a vulnerability ------------ See [SECURITY.md](SECURITY.md) diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac index 196205f14..712a774ba 100644 --- a/src/secp256k1/configure.ac +++ b/src/secp256k1/configure.ac @@ -1,597 +1,586 @@ AC_PREREQ([2.60]) AC_INIT([libsecp256k1],[0.1]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([build-aux/m4]) AC_CANONICAL_HOST AH_TOP([#ifndef LIBSECP256K1_CONFIG_H]) AH_TOP([#define LIBSECP256K1_CONFIG_H]) AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/]) AM_INIT_AUTOMAKE([foreign subdir-objects]) # Set -g if CFLAGS are not already set, which matches the default autoconf # behavior (see PROG_CC in the Autoconf manual) with the exception that we don't # set -O2 here because we set it in any case (see further down). : ${CFLAGS="-g"} LT_INIT dnl make the compilation flags quiet unless V=1 is used m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) PKG_PROG_PKG_CONFIG AC_PATH_TOOL(AR, ar) AC_PATH_TOOL(RANLIB, ranlib) AC_PATH_TOOL(STRIP, strip) AX_PROG_CC_FOR_BUILD AM_PROG_CC_C_O AC_PROG_CC_C89 if test x"$ac_cv_prog_cc_c89" = x"no"; then AC_MSG_ERROR([c89 compiler support required]) fi AM_PROG_AS case $host_os in *darwin*) if test x$cross_compiling != xyes; then AC_PATH_PROG([BREW],brew,) if test x$BREW != x; then dnl These Homebrew packages may be keg-only, meaning that they won't be found dnl in expected paths because they may conflict with system files. Ask dnl Homebrew where each one is located, then adjust paths accordingly. openssl_prefix=`$BREW --prefix openssl 2>/dev/null` gmp_prefix=`$BREW --prefix gmp 2>/dev/null` if test x$openssl_prefix != x; then PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" export PKG_CONFIG_PATH CRYPTO_CPPFLAGS="-I$openssl_prefix/include" fi if test x$gmp_prefix != x; then GMP_CPPFLAGS="-I$gmp_prefix/include" GMP_LIBS="-L$gmp_prefix/lib" fi else AC_PATH_PROG([PORT],port,) dnl if homebrew isn't installed and macports is, add the macports default paths dnl as a last resort. if test x$PORT != x; then CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" LDFLAGS="$LDFLAGS -L/opt/local/lib" fi fi fi ;; esac CFLAGS="-W $CFLAGS" warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings" saved_CFLAGS="$CFLAGS" CFLAGS="$warn_CFLAGS $CFLAGS" AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [ AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) CFLAGS="$saved_CFLAGS" ]) saved_CFLAGS="$CFLAGS" CFLAGS="-fvisibility=hidden $CFLAGS" AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [ AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) CFLAGS="$saved_CFLAGS" ]) AC_ARG_ENABLE(benchmark, AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]), [use_benchmark=$enableval], [use_benchmark=yes]) AC_ARG_ENABLE(coverage, AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]), [enable_coverage=$enableval], [enable_coverage=no]) AC_ARG_ENABLE(tests, AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]), [use_tests=$enableval], [use_tests=yes]) AC_ARG_ENABLE(openssl_tests, AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests [default=auto]]), [enable_openssl_tests=$enableval], [enable_openssl_tests=auto]) AC_ARG_ENABLE(experimental, AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]), [use_experimental=$enableval], [use_experimental=no]) AC_ARG_ENABLE(exhaustive_tests, AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]), [use_exhaustive_tests=$enableval], [use_exhaustive_tests=yes]) -AC_ARG_ENABLE(endomorphism, - AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]), - [use_endomorphism=$enableval], - [use_endomorphism=no]) - AC_ARG_ENABLE(ecmult_static_precomputation, AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]), [use_ecmult_static_precomputation=$enableval], [use_ecmult_static_precomputation=auto]) AC_ARG_ENABLE(module_ecdh, AS_HELP_STRING([--enable-module-ecdh],[enable ECDH shared secret computation (experimental)]), [enable_module_ecdh=$enableval], [enable_module_ecdh=no]) AC_ARG_ENABLE(module_multiset, AS_HELP_STRING([--enable-module-multiset],[enable multiset operations (experimental)]), [enable_module_multiset=$enableval], [enable_module_multiset=no]) AC_ARG_ENABLE(module_recovery, AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]), [enable_module_recovery=$enableval], [enable_module_recovery=no]) AC_ARG_ENABLE(module_schnorr, AS_HELP_STRING([--enable-module-schnorr],[enable Schnorr signatures module [default=yes]]), [enable_module_schnorr=$enableval], [enable_module_schnorr=yes]) AC_ARG_ENABLE(module_extrakeys, AS_HELP_STRING([--enable-module-extrakeys],[enable extrakeys module (experimental)]), [enable_module_extrakeys=$enableval], [enable_module_extrakeys=no]) AC_ARG_ENABLE(module_schnorrsig, AS_HELP_STRING([--enable-module-schnorrsig],[enable schnorrsig module (experimental)]), [enable_module_schnorrsig=$enableval], [enable_module_schnorrsig=no]) AC_ARG_ENABLE(external_default_callbacks, AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions [default=no]]), [use_external_default_callbacks=$enableval], [use_external_default_callbacks=no]) dnl Test-only override of the (autodetected by the C code) "widemul" setting. dnl Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default). AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto]) AC_ARG_ENABLE(jni, AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]), [use_jni=$enableval], [use_jni=no]) AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], [bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto]) AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], [assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto]) AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto], [window size for ecmult precomputation for verification, specified as integer in range [2..24].] [Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.] -[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.] -[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.] +[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.] ["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]] )], [req_ecmult_window=$withval], [req_ecmult_window=auto]) AC_ARG_WITH([ecmult-gen-precision], [AS_HELP_STRING([--with-ecmult-gen-precision=2|4|8|auto], [Precision bits to tune the precomputed table size for signing.] [The size of the table is 32kB for 2 bits, 64kB for 4 bits, 512kB for 8 bits of precision.] [A larger table size usually results in possible faster signing.] ["auto" is a reasonable setting for desktop machines (currently 4). [default=auto]] )], [req_ecmult_gen_precision=$withval], [req_ecmult_gen_precision=auto]) AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto], [Build with extra checks for running inside Valgrind [default=auto]] )], [req_valgrind=$withval], [req_valgrind=auto]) if test x"$req_valgrind" = x"no"; then enable_valgrind=no else AC_CHECK_HEADER([valgrind/memcheck.h], [enable_valgrind=yes], [ if test x"$req_valgrind" = x"yes"; then AC_MSG_ERROR([Valgrind support explicitly requested but valgrind/memcheck.h header not available]) fi enable_valgrind=no ], []) fi AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) if test x"$enable_coverage" = x"yes"; then AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) CFLAGS="-O0 --coverage $CFLAGS" LDFLAGS="--coverage $LDFLAGS" else CFLAGS="-O2 $CFLAGS" fi if test x"$use_ecmult_static_precomputation" != x"no"; then # Temporarily switch to an environment for the native compiler save_cross_compiling=$cross_compiling cross_compiling=no SAVE_CC="$CC" CC="$CC_FOR_BUILD" SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS_FOR_BUILD" SAVE_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS_FOR_BUILD" SAVE_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS_FOR_BUILD" warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" saved_CFLAGS="$CFLAGS" CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS" AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [ AC_MSG_RESULT([yes]) ], [ AC_MSG_RESULT([no]) CFLAGS="$saved_CFLAGS" ]) AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) AC_RUN_IFELSE( [AC_LANG_PROGRAM([], [])], [working_native_cc=yes], [working_native_cc=no],[:]) CFLAGS_FOR_BUILD="$CFLAGS" # Restore the environment cross_compiling=$save_cross_compiling CC="$SAVE_CC" CFLAGS="$SAVE_CFLAGS" CPPFLAGS="$SAVE_CPPFLAGS" LDFLAGS="$SAVE_LDFLAGS" if test x"$working_native_cc" = x"no"; then AC_MSG_RESULT([no]) set_precomp=no m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) if test x"$use_ecmult_static_precomputation" = x"yes"; then AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) else AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) fi else AC_MSG_RESULT([yes]) set_precomp=yes fi else set_precomp=no fi if test x"$req_asm" = x"auto"; then SECP_64BIT_ASM_CHECK if test x"$has_64bit_asm" = x"yes"; then set_asm=x86_64 fi if test x"$set_asm" = x; then set_asm=no fi else set_asm=$req_asm case $set_asm in x86_64) SECP_64BIT_ASM_CHECK if test x"$has_64bit_asm" != x"yes"; then AC_MSG_ERROR([x86_64 assembly optimization requested but not available]) fi ;; arm) ;; no) ;; *) AC_MSG_ERROR([invalid assembly optimization selection]) ;; esac fi if test x"$req_bignum" = x"auto"; then SECP_GMP_CHECK if test x"$has_gmp" = x"yes"; then set_bignum=gmp fi if test x"$set_bignum" = x; then set_bignum=no fi else set_bignum=$req_bignum case $set_bignum in gmp) SECP_GMP_CHECK if test x"$has_gmp" != x"yes"; then AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available]) fi ;; no) ;; *) AC_MSG_ERROR([invalid bignum implementation selection]) ;; esac fi # select assembly optimization use_external_asm=no case $set_asm in x86_64) AC_DEFINE(USE_ASM_X86_64, 1, [Define this symbol to enable x86_64 assembly optimizations]) ;; arm) use_external_asm=yes ;; no) ;; *) AC_MSG_ERROR([invalid assembly optimizations]) ;; esac # select wide multiplication implementation case $set_widemul in int128) AC_DEFINE(USE_FORCE_WIDEMUL_INT128, 1, [Define this symbol to force the use of the (unsigned) __int128 based wide multiplication implementation]) ;; int64) AC_DEFINE(USE_FORCE_WIDEMUL_INT64, 1, [Define this symbol to force the use of the (u)int64_t based wide multiplication implementation]) ;; auto) ;; *) AC_MSG_ERROR([invalid wide multiplication implementation]) ;; esac # select bignum implementation case $set_bignum in gmp) AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed]) AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num]) AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation]) AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation]) ;; no) AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation]) AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation]) AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation]) ;; *) AC_MSG_ERROR([invalid bignum implementation]) ;; esac #set ecmult window size if test x"$req_ecmult_window" = x"auto"; then set_ecmult_window=15 else set_ecmult_window=$req_ecmult_window fi error_window_size=['window size for ecmult precomputation not an integer in range [2..24] or "auto"'] case $set_ecmult_window in ''|*[[!0-9]]*) # no valid integer AC_MSG_ERROR($error_window_size) ;; *) if test "$set_ecmult_window" -lt 2 -o "$set_ecmult_window" -gt 24 ; then # not in range AC_MSG_ERROR($error_window_size) fi AC_DEFINE_UNQUOTED(ECMULT_WINDOW_SIZE, $set_ecmult_window, [Set window size for ecmult precomputation]) ;; esac #set ecmult gen precision if test x"$req_ecmult_gen_precision" = x"auto"; then set_ecmult_gen_precision=4 else set_ecmult_gen_precision=$req_ecmult_gen_precision fi case $set_ecmult_gen_precision in 2|4|8) AC_DEFINE_UNQUOTED(ECMULT_GEN_PREC_BITS, $set_ecmult_gen_precision, [Set ecmult gen precision bits]) ;; *) AC_MSG_ERROR(['ecmult gen precision not 2, 4, 8 or "auto"']) ;; esac if test x"$use_tests" = x"yes"; then SECP_OPENSSL_CHECK if test x"$has_openssl_ec" = x"yes"; then if test x"$enable_openssl_tests" != x"no"; then AC_DEFINE(ENABLE_OPENSSL_TESTS, 1, [Define this symbol if OpenSSL EC functions are available]) SECP_TEST_INCLUDES="$SSL_CFLAGS $CRYPTO_CFLAGS $CRYPTO_CPPFLAGS" SECP_TEST_LIBS="$CRYPTO_LIBS" case $host in *mingw*) SECP_TEST_LIBS="$SECP_TEST_LIBS -lgdi32" ;; esac fi else if test x"$enable_openssl_tests" = x"yes"; then AC_MSG_ERROR([OpenSSL tests requested but OpenSSL with EC support is not available]) fi fi else if test x"$enable_openssl_tests" = x"yes"; then AC_MSG_ERROR([OpenSSL tests requested but tests are not enabled]) fi fi if test x"$use_jni" != x"no"; then AX_JNI_INCLUDE_DIR have_jni_dependencies=yes if test x"$enable_module_ecdh" = x"no"; then have_jni_dependencies=no fi if test "x$JNI_INCLUDE_DIRS" = "x"; then have_jni_dependencies=no fi if test "x$have_jni_dependencies" = "xno"; then if test x"$use_jni" = x"yes"; then AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and try again.]) fi AC_MSG_WARN([jni headers/dependencies not found. jni support disabled]) use_jni=no else use_jni=yes for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS; do JNI_INCLUDES="$JNI_INCLUDES -I$JNI_INCLUDE_DIR" done fi fi if test x"$set_bignum" = x"gmp"; then SECP_LIBS="$SECP_LIBS $GMP_LIBS" SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" fi -if test x"$use_endomorphism" = x"yes"; then - AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) -fi - if test x"$set_precomp" = x"yes"; then AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table]) fi if test x"$enable_module_ecdh" = x"yes"; then AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module]) fi if test x"$enable_module_multiset" = x"yes"; then AC_DEFINE(ENABLE_MODULE_MULTISET, 1, [Define this symbol to enable the multiset module]) fi if test x"$enable_module_recovery" = x"yes"; then AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module]) fi if test x"$enable_module_schnorr" = x"yes"; then AC_DEFINE(ENABLE_MODULE_SCHNORR, 1, [Define this symbol to enable the Schnorr signature module]) fi if test x"$enable_module_schnorrsig" = x"yes"; then AC_DEFINE(ENABLE_MODULE_SCHNORRSIG, 1, [Define this symbol to enable the schnorrsig module]) enable_module_extrakeys=yes fi # Test if extrakeys is set after the schnorrsig module to allow the schnorrsig # module to set enable_module_extrakeys=yes if test x"$enable_module_extrakeys" = x"yes"; then AC_DEFINE(ENABLE_MODULE_EXTRAKEYS, 1, [Define this symbol to enable the extrakeys module]) fi if test x"$use_external_asm" = x"yes"; then AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) fi if test x"$use_external_default_callbacks" = x"yes"; then AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used]) fi if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([******]) AC_MSG_NOTICE([WARNING: experimental build]) AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.]) AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) AC_MSG_NOTICE([Building extrakeys module: $enable_module_extrakeys]) AC_MSG_NOTICE([Building schnorrsig module: $enable_module_schnorrsig]) AC_MSG_NOTICE([******]) else if test x"$enable_module_ecdh" = x"yes"; then AC_MSG_ERROR([ECDH module is experimental. Use --enable-experimental to allow.]) fi if test x"$enable_module_multiset" = x"yes"; then AC_MSG_ERROR([Multiset module is experimental. Use --enable-experimental to allow.]) fi if test x"$enable_module_extrakeys" = x"yes"; then AC_MSG_ERROR([extrakeys module is experimental. Use --enable-experimental to allow.]) fi if test x"$enable_module_schnorrsig" = x"yes"; then AC_MSG_ERROR([schnorrsig module is experimental. Use --enable-experimental to allow.]) fi if test x"$set_asm" = x"arm"; then AC_MSG_ERROR([ARM assembly optimization is experimental. Use --enable-experimental to allow.]) fi fi AC_CONFIG_HEADERS([src/libsecp256k1-config.h]) AC_CONFIG_FILES([Makefile libsecp256k1.pc]) AC_SUBST(JNI_INCLUDES) AC_SUBST(SECP_INCLUDES) AC_SUBST(SECP_LIBS) AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_MULTISET], [test x"$enable_module_multiset" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_SCHNORR], [test x"$enable_module_schnorr" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_EXTRAKEYS], [test x"$enable_module_extrakeys" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_SCHNORRSIG], [test x"$enable_module_schnorrsig" = x"yes"]) AM_CONDITIONAL([USE_JNI], [test x"$use_jni" = x"yes"]) AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) dnl make sure nothing new is exported so that we don't break the cache PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" unset PKG_CONFIG_PATH PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" AC_OUTPUT echo echo "Build Options:" -echo " with endomorphism = $use_endomorphism" echo " with ecmult precomp = $set_precomp" echo " with external callbacks = $use_external_default_callbacks" echo " with jni = $use_jni" echo " with benchmarks = $use_benchmark" echo " with coverage = $enable_coverage" echo " module ecdh = $enable_module_ecdh" echo " module recovery = $enable_module_recovery" echo " module multiset = $enable_module_multiset" echo " module schnorr = $enable_module_schnorr" echo " module extrakeys = $enable_module_extrakeys" echo " module schnorrsig = $enable_module_schnorrsig" echo echo " asm = $set_asm" echo " bignum = $set_bignum" echo " ecmult window size = $set_ecmult_window" echo " ecmult gen prec. bits = $set_ecmult_gen_precision" dnl Hide test-only options unless they're used. if test x"$set_widemul" != xauto; then echo " wide multiplication = $set_widemul" fi echo echo " valgrind = $enable_valgrind" echo " CC = $CC" echo " CFLAGS = $CFLAGS" echo " CPPFLAGS = $CPPFLAGS" echo " LDFLAGS = $LDFLAGS" echo diff --git a/src/secp256k1/src/basic-config.h b/src/secp256k1/src/basic-config.h index 83dbe6f25..b0d82e89b 100644 --- a/src/secp256k1/src/basic-config.h +++ b/src/secp256k1/src/basic-config.h @@ -1,35 +1,34 @@ /********************************************************************** * Copyright (c) 2013, 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_BASIC_CONFIG_H #define SECP256K1_BASIC_CONFIG_H #ifdef USE_BASIC_CONFIG #undef USE_ASM_X86_64 #undef USE_ECMULT_STATIC_PRECOMPUTATION -#undef USE_ENDOMORPHISM #undef USE_EXTERNAL_ASM #undef USE_EXTERNAL_DEFAULT_CALLBACKS #undef USE_FIELD_INV_BUILTIN #undef USE_FIELD_INV_NUM #undef USE_NUM_GMP #undef USE_NUM_NONE #undef USE_SCALAR_INV_BUILTIN #undef USE_SCALAR_INV_NUM #undef USE_FORCE_WIDEMUL_INT64 #undef USE_FORCE_WIDEMUL_INT128 #undef ECMULT_WINDOW_SIZE #define USE_NUM_NONE 1 #define USE_FIELD_INV_BUILTIN 1 #define USE_SCALAR_INV_BUILTIN 1 #define USE_WIDEMUL_64 1 #define ECMULT_WINDOW_SIZE 15 #endif /* USE_BASIC_CONFIG */ #endif /* SECP256K1_BASIC_CONFIG_H */ diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c index 9687fe448..5f2b7a975 100644 --- a/src/secp256k1/src/bench_internal.c +++ b/src/secp256k1/src/bench_internal.c @@ -1,435 +1,431 @@ /********************************************************************** * Copyright (c) 2014-2015 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #include #include "include/secp256k1.h" #include "assumptions.h" #include "util.h" #include "hash_impl.h" #include "num_impl.h" #include "field_impl.h" #include "group_impl.h" #include "scalar_impl.h" #include "ecmult_const_impl.h" #include "ecmult_impl.h" #include "bench.h" #include "secp256k1.c" typedef struct { secp256k1_scalar scalar[2]; secp256k1_fe fe[4]; secp256k1_ge ge[2]; secp256k1_gej gej[2]; unsigned char data[64]; int wnaf[256]; } bench_inv; void bench_setup(void* arg) { bench_inv *data = (bench_inv*)arg; static const unsigned char init[4][32] = { /* Initializer for scalar[0], fe[0], first half of data, the X coordinate of ge[0], and the (implied affine) X coordinate of gej[0]. */ { 0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13, 0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35, 0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59, 0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83 }, /* Initializer for scalar[1], fe[1], first half of data, the X coordinate of ge[1], and the (implied affine) X coordinate of gej[1]. */ { 0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83, 0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5, 0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9, 0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3 }, /* Initializer for fe[2] and the Z coordinate of gej[0]. */ { 0x3d, 0x2d, 0xef, 0xf4, 0x25, 0x98, 0x4f, 0x5d, 0xe2, 0xca, 0x5f, 0x41, 0x3f, 0x3f, 0xce, 0x44, 0xaa, 0x2c, 0x53, 0x8a, 0xc6, 0x59, 0x1f, 0x38, 0x38, 0x23, 0xe4, 0x11, 0x27, 0xc6, 0xa0, 0xe7 }, /* Initializer for fe[3] and the Z coordinate of gej[1]. */ { 0xbd, 0x21, 0xa5, 0xe1, 0x13, 0x50, 0x73, 0x2e, 0x52, 0x98, 0xc8, 0x9e, 0xab, 0x00, 0xa2, 0x68, 0x43, 0xf5, 0xd7, 0x49, 0x80, 0x72, 0xa7, 0xf3, 0xd7, 0x60, 0xe6, 0xab, 0x90, 0x92, 0xdf, 0xc5 } }; secp256k1_scalar_set_b32(&data->scalar[0], init[0], NULL); secp256k1_scalar_set_b32(&data->scalar[1], init[1], NULL); secp256k1_fe_set_b32(&data->fe[0], init[0]); secp256k1_fe_set_b32(&data->fe[1], init[1]); secp256k1_fe_set_b32(&data->fe[2], init[2]); secp256k1_fe_set_b32(&data->fe[3], init[3]); CHECK(secp256k1_ge_set_xo_var(&data->ge[0], &data->fe[0], 0)); CHECK(secp256k1_ge_set_xo_var(&data->ge[1], &data->fe[1], 1)); secp256k1_gej_set_ge(&data->gej[0], &data->ge[0]); secp256k1_gej_rescale(&data->gej[0], &data->fe[2]); secp256k1_gej_set_ge(&data->gej[1], &data->ge[1]); secp256k1_gej_rescale(&data->gej[1], &data->fe[3]); memcpy(data->data, init[0], 32); memcpy(data->data + 32, init[1], 32); } void bench_scalar_add(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } void bench_scalar_negate(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_scalar_negate(&data->scalar[0], &data->scalar[0]); } } void bench_scalar_sqr(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_scalar_sqr(&data->scalar[0], &data->scalar[0]); } } void bench_scalar_mul(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_scalar_mul(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } } -#ifdef USE_ENDOMORPHISM void bench_scalar_split(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_scalar_split_lambda(&data->scalar[0], &data->scalar[1], &data->scalar[0]); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } -#endif void bench_scalar_inverse(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_scalar_inverse(&data->scalar[0], &data->scalar[0]); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } void bench_scalar_inverse_var(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_scalar_inverse_var(&data->scalar[0], &data->scalar[0]); j += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(j <= iters); } void bench_field_normalize(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_fe_normalize(&data->fe[0]); } } void bench_field_normalize_weak(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_fe_normalize_weak(&data->fe[0]); } } void bench_field_mul(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_fe_mul(&data->fe[0], &data->fe[0], &data->fe[1]); } } void bench_field_sqr(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_fe_sqr(&data->fe[0], &data->fe[0]); } } void bench_field_inverse(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_fe_inv(&data->fe[0], &data->fe[0]); secp256k1_fe_add(&data->fe[0], &data->fe[1]); } } void bench_field_inverse_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_fe_inv_var(&data->fe[0], &data->fe[0]); secp256k1_fe_add(&data->fe[0], &data->fe[1]); } } void bench_field_sqrt(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; secp256k1_fe t; for (i = 0; i < iters; i++) { t = data->fe[0]; j += secp256k1_fe_sqrt(&data->fe[0], &t); secp256k1_fe_add(&data->fe[0], &data->fe[1]); } CHECK(j <= iters); } void bench_group_double_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_gej_double_var(&data->gej[0], &data->gej[0], NULL); } } void bench_group_add_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_gej_add_var(&data->gej[0], &data->gej[0], &data->gej[1], NULL); } } void bench_group_add_affine(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_gej_add_ge(&data->gej[0], &data->gej[0], &data->ge[1]); } } void bench_group_add_affine_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { secp256k1_gej_add_ge_var(&data->gej[0], &data->gej[0], &data->ge[1], NULL); } } void bench_group_jacobi_var(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { j += secp256k1_gej_has_quad_y_var(&data->gej[0]); /* Vary the Y and Z coordinates of the input (the X coordinate doesn't matter to secp256k1_gej_has_quad_y_var). Note that the resulting coordinates will generally not correspond to a point on the curve, but this is not a problem for the code being benchmarked here. Adding and normalizing have less overhead than EC operations (which could guarantee the point remains on the curve). */ secp256k1_fe_add(&data->gej[0].y, &data->fe[1]); secp256k1_fe_add(&data->gej[0].z, &data->fe[2]); secp256k1_fe_normalize_var(&data->gej[0].y); secp256k1_fe_normalize_var(&data->gej[0].z); } CHECK(j <= iters); } void bench_group_to_affine_var(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; ++i) { secp256k1_ge_set_gej_var(&data->ge[1], &data->gej[0]); /* Use the output affine X/Y coordinates to vary the input X/Y/Z coordinates. Similar to bench_group_jacobi_var, this approach does not result in coordinates of points on the curve. */ secp256k1_fe_add(&data->gej[0].x, &data->ge[1].y); secp256k1_fe_add(&data->gej[0].y, &data->fe[2]); secp256k1_fe_add(&data->gej[0].z, &data->ge[1].x); secp256k1_fe_normalize_var(&data->gej[0].x); secp256k1_fe_normalize_var(&data->gej[0].y); secp256k1_fe_normalize_var(&data->gej[0].z); } } void bench_ecmult_wnaf(void* arg, int iters) { int i, bits = 0, overflow = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { bits += secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar[0], WINDOW_A); overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); } void bench_wnaf_const(void* arg, int iters) { int i, bits = 0, overflow = 0; bench_inv *data = (bench_inv*)arg; for (i = 0; i < iters; i++) { bits += secp256k1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256); overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]); } CHECK(overflow >= 0); CHECK(bits <= 256*iters); } void bench_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; secp256k1_sha256 sha; for (i = 0; i < iters; i++) { secp256k1_sha256_initialize(&sha); secp256k1_sha256_write(&sha, data->data, 32); secp256k1_sha256_finalize(&sha, data->data); } } void bench_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; secp256k1_hmac_sha256 hmac; for (i = 0; i < iters; i++) { secp256k1_hmac_sha256_initialize(&hmac, data->data, 32); secp256k1_hmac_sha256_write(&hmac, data->data, 32); secp256k1_hmac_sha256_finalize(&hmac, data->data); } } void bench_rfc6979_hmac_sha256(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; secp256k1_rfc6979_hmac_sha256 rng; for (i = 0; i < iters; i++) { secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32); } } void bench_context_verify(void* arg, int iters) { int i; (void)arg; for (i = 0; i < iters; i++) { secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_VERIFY)); } } void bench_context_sign(void* arg, int iters) { int i; (void)arg; for (i = 0; i < iters; i++) { secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_SIGN)); } } #ifndef USE_NUM_NONE void bench_num_jacobi(void* arg, int iters) { int i, j = 0; bench_inv *data = (bench_inv*)arg; secp256k1_num nx, na, norder; secp256k1_scalar_get_num(&nx, &data->scalar[0]); secp256k1_scalar_order_get_num(&norder); secp256k1_scalar_get_num(&na, &data->scalar[1]); for (i = 0; i < iters; i++) { j += secp256k1_num_jacobi(&nx, &norder); secp256k1_num_add(&nx, &nx, &na); } CHECK(j <= iters); } #endif int main(int argc, char **argv) { bench_inv data; int iters = get_iters(20000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10); -#ifdef USE_ENDOMORPHISM if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters); -#endif if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt", bench_field_sqrt, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 1 + iters/1000); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 1 + iters/100); #ifndef USE_NUM_NONE if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, iters*10); #endif return 0; } diff --git a/src/secp256k1/src/ecmult.h b/src/secp256k1/src/ecmult.h index c9b198239..09e814641 100644 --- a/src/secp256k1/src/ecmult.h +++ b/src/secp256k1/src/ecmult.h @@ -1,48 +1,46 @@ /********************************************************************** * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_ECMULT_H #define SECP256K1_ECMULT_H #include "num.h" #include "group.h" #include "scalar.h" #include "scratch.h" typedef struct { /* For accelerating the computation of a*P + b*G: */ secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */ -#ifdef USE_ENDOMORPHISM secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ -#endif } secp256k1_ecmult_context; static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx); static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc); static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src); static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx); static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx); /** Double multiply: R = na*A + ng*G */ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng); typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data); /** * Multi-multiply: R = inp_g_sc * G + sum_i ni * Ai. * Chooses the right algorithm for a given number of points and scratch space * size. Resets and overwrites the given scratch space. If the points do not * fit in the scratch space the algorithm is repeatedly run with batches of * points. If no scratch space is given then a simple algorithm is used that * simply multiplies the points with the corresponding scalars and adds them up. * Returns: 1 on success (including when inp_g_sc is NULL and n is 0) * 0 if there is not enough scratch space for a single point or * callback returns 0 */ static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n); #endif /* SECP256K1_ECMULT_H */ diff --git a/src/secp256k1/src/ecmult_const_impl.h b/src/secp256k1/src/ecmult_const_impl.h index 55b61e493..bb9511108 100644 --- a/src/secp256k1/src/ecmult_const_impl.h +++ b/src/secp256k1/src/ecmult_const_impl.h @@ -1,274 +1,254 @@ /********************************************************************** * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_ECMULT_CONST_IMPL_H #define SECP256K1_ECMULT_CONST_IMPL_H #include "scalar.h" #include "group.h" #include "ecmult_const.h" #include "ecmult_impl.h" /* This is like `ECMULT_TABLE_GET_GE` but is constant time */ #define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \ int m = 0; \ /* Extract the sign-bit for a constant time absolute-value. */ \ int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \ int abs_n = ((n) + mask) ^ mask; \ int idx_n = abs_n >> 1; \ secp256k1_fe neg_y; \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \ VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \ /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \ * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \ (r)->x = (pre)[m].x; \ (r)->y = (pre)[m].y; \ for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \ /* This loop is used to avoid secret data in array indices. See * the comment in ecmult_gen_impl.h for rationale. */ \ secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ } \ (r)->infinity = 0; \ secp256k1_fe_negate(&neg_y, &(r)->y, 1); \ secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ } while(0) /** Convert a number to WNAF notation. * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. * It has the following guarantees: * - each wnaf[i] an odd integer between -(1 << w) and (1 << w) * - each wnaf[i] is nonzero * - the number of words set is always WNAF_SIZE(w) + 1 * * Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar * Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.) * CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlag Berlin Heidelberg 2003 * * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 */ static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) { int global_sign; int skew = 0; int word = 0; /* 1 2 3 */ int u_last; int u; int flip; int bit; secp256k1_scalar s; int not_neg_one; VERIFY_CHECK(w > 0); VERIFY_CHECK(size > 0); /* Note that we cannot handle even numbers by negating them to be odd, as is * done in other implementations, since if our scalars were specified to have * width < 256 for performance reasons, their negations would have width 256 * and we'd lose any performance benefit. Instead, we use a technique from * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even) * or 2 (for odd) to the number we are encoding, returning a skew value indicating * this, and having the caller compensate after doing the multiplication. * * In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in * particular, to ensure that the outputs from the endomorphism-split fit into * 128 bits). If we negate, the parity of our number flips, inverting which of * {1, 2} we want to add to the scalar when ensuring that it's odd. Further * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and * we need to special-case it in this logic. */ flip = secp256k1_scalar_is_high(scalar); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ bit = flip ^ !secp256k1_scalar_is_even(scalar); /* We check for negative one, since adding 2 to it will cause an overflow */ secp256k1_scalar_negate(&s, scalar); not_neg_one = !secp256k1_scalar_is_one(&s); s = *scalar; secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects * that we added two to it and flipped it. In fact for -1 these operations are * identical. We only flipped, but since skewing is required (in the sense that * the skew must be 1 or 2, never zero) and flipping is not, we need to change * our flags to claim that we only skewed. */ global_sign = secp256k1_scalar_cond_negate(&s, flip); global_sign *= not_neg_one * 2 - 1; skew = 1 << bit; /* 4 */ u_last = secp256k1_scalar_shr_int(&s, w); do { int even; /* 4.1 4.4 */ u = secp256k1_scalar_shr_int(&s, w); /* 4.2 */ even = ((u & 1) == 0); /* In contrast to the original algorithm, u_last is always > 0 and * therefore we do not need to check its sign. In particular, it's easy * to see that u_last is never < 0 because u is never < 0. Moreover, * u_last is never = 0 because u is never even after a loop * iteration. The same holds analogously for the initial value of * u_last (in the first loop iteration). */ VERIFY_CHECK(u_last > 0); VERIFY_CHECK((u_last & 1) == 1); u += even; u_last -= even * (1 << w); /* 4.3, adapted for global sign change */ wnaf[word++] = u_last * global_sign; u_last = u; } while (word * w < size); wnaf[word] = u * global_sign; VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w)); return skew; } static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) { secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_ge tmpa; secp256k1_fe Z; int skew_1; -#ifdef USE_ENDOMORPHISM secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; int skew_lam; secp256k1_scalar q_1, q_lam; -#endif int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int i; /* build wnaf representation for q. */ int rsize = size; -#ifdef USE_ENDOMORPHISM if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar); skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else -#endif { skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); -#ifdef USE_ENDOMORPHISM skew_lam = 0; -#endif } /* Calculate odd multiples of a. * All multiples are brought to the same Z 'denominator', which is stored * in Z. Due to secp256k1' isomorphism we can do all operations pretending * that the Z coordinate was 1, use affine addition formulae, and correct * the Z coordinate of the result once at the end. */ secp256k1_gej_set_ge(r, a); secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_fe_normalize_weak(&pre_a[i].y); } -#ifdef USE_ENDOMORPHISM if (size > 128) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); } } -#endif /* first loop iteration (separated out so we can directly set r, rather * than having it start at infinity, get doubled several times, then have * its new value added to it) */ i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); secp256k1_gej_set_ge(r, &tmpa); -#ifdef USE_ENDOMORPHISM if (size > 128) { i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)]; VERIFY_CHECK(i != 0); ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); secp256k1_gej_add_ge(r, r, &tmpa); } -#endif /* remaining loop iterations */ for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) { int n; int j; for (j = 0; j < WINDOW_A - 1; ++j) { secp256k1_gej_double(r, r); } n = wnaf_1[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); VERIFY_CHECK(n != 0); secp256k1_gej_add_ge(r, r, &tmpa); -#ifdef USE_ENDOMORPHISM if (size > 128) { n = wnaf_lam[i]; ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); VERIFY_CHECK(n != 0); secp256k1_gej_add_ge(r, r, &tmpa); } -#endif } secp256k1_fe_mul(&r->z, &r->z, &Z); { /* Correct for wNAF skew */ secp256k1_ge correction = *a; secp256k1_ge_storage correction_1_stor; -#ifdef USE_ENDOMORPHISM secp256k1_ge_storage correction_lam_stor; -#endif secp256k1_ge_storage a2_stor; secp256k1_gej tmpj; secp256k1_gej_set_ge(&tmpj, &correction); secp256k1_gej_double_var(&tmpj, &tmpj, NULL); secp256k1_ge_set_gej(&correction, &tmpj); secp256k1_ge_to_storage(&correction_1_stor, a); -#ifdef USE_ENDOMORPHISM if (size > 128) { secp256k1_ge_to_storage(&correction_lam_stor, a); } -#endif secp256k1_ge_to_storage(&a2_stor, &correction); /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); -#ifdef USE_ENDOMORPHISM if (size > 128) { secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); } -#endif /* Apply the correction */ secp256k1_ge_from_storage(&correction, &correction_1_stor); secp256k1_ge_neg(&correction, &correction); secp256k1_gej_add_ge(r, r, &correction); -#ifdef USE_ENDOMORPHISM if (size > 128) { secp256k1_ge_from_storage(&correction, &correction_lam_stor); secp256k1_ge_neg(&correction, &correction); secp256k1_ge_mul_lambda(&correction, &correction); secp256k1_gej_add_ge(r, r, &correction); } -#endif } } #endif /* SECP256K1_ECMULT_CONST_IMPL_H */ diff --git a/src/secp256k1/src/ecmult_impl.h b/src/secp256k1/src/ecmult_impl.h index 866b63a32..e4e777fee 100644 --- a/src/secp256k1/src/ecmult_impl.h +++ b/src/secp256k1/src/ecmult_impl.h @@ -1,1216 +1,1083 @@ /***************************************************************************** * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php. * *****************************************************************************/ #ifndef SECP256K1_ECMULT_IMPL_H #define SECP256K1_ECMULT_IMPL_H #include #include #include "util.h" #include "group.h" #include "scalar.h" #include "ecmult.h" #if defined(EXHAUSTIVE_TEST_ORDER) /* We need to lower these values for exhaustive tests because * the tables cannot have infinities in them (this breaks the * affine-isomorphism stuff which tracks z-ratios) */ # if EXHAUSTIVE_TEST_ORDER > 128 # define WINDOW_A 5 # define WINDOW_G 8 # elif EXHAUSTIVE_TEST_ORDER > 8 # define WINDOW_A 4 # define WINDOW_G 4 # else # define WINDOW_A 2 # define WINDOW_G 2 # endif #else /* optimal for 128-bit and 256-bit exponents. */ # define WINDOW_A 5 /** Larger values for ECMULT_WINDOW_SIZE result in possibly better * performance at the cost of an exponentially larger precomputed * table. The exact table size is * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can * be larger due to platform-specific padding and alignment. - * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM) - * two tables of this size are used instead of only one. + * Two tables of this size are used (due to the endomorphism + * optimization). */ # define WINDOW_G ECMULT_WINDOW_SIZE #endif /* Noone will ever need more than a window size of 24. The code might * be correct for larger values of ECMULT_WINDOW_SIZE but this is not * tested. * * The following limitations are known, and there are probably more: * If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect * because the size of the memory object that we allocate (in bytes) * will not fit in a size_t. * If WINDOW_G > 31 and int has 32 bits, then the code is incorrect * because certain expressions will overflow. */ #if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24 # error Set ECMULT_WINDOW_SIZE to an integer in range [2..24]. #endif -#ifdef USE_ENDOMORPHISM - #define WNAF_BITS 128 -#else - #define WNAF_BITS 256 -#endif +#define WNAF_BITS 128 #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w)) #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) /** The number of entries a table with precomputed multiples needs to have. */ #define ECMULT_TABLE_SIZE(w) (1 << ((w)-2)) /* The number of objects allocated on the scratch space for ecmult_multi algorithms */ #define PIPPENGER_SCRATCH_OBJECTS 6 #define STRAUSS_SCRATCH_OBJECTS 6 #define PIPPENGER_MAX_BUCKET_WINDOW 12 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */ -#ifdef USE_ENDOMORPHISM - #define ECMULT_PIPPENGER_THRESHOLD 88 -#else - #define ECMULT_PIPPENGER_THRESHOLD 160 -#endif +#define ECMULT_PIPPENGER_THRESHOLD 88 -#ifdef USE_ENDOMORPHISM - #define ECMULT_MAX_POINTS_PER_BATCH 5000000 -#else - #define ECMULT_MAX_POINTS_PER_BATCH 10000000 -#endif +#define ECMULT_MAX_POINTS_PER_BATCH 5000000 /** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. * Prej's Z values are undefined, except for the last value. */ static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) { secp256k1_gej d; secp256k1_ge a_ge, d_ge; int i; VERIFY_CHECK(!a->infinity); secp256k1_gej_double_var(&d, a, NULL); /* * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate * of 'd', and scale the 1P starting value's x/y coordinates without changing its z. */ d_ge.x = d.x; d_ge.y = d.y; d_ge.infinity = 0; secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z); prej[0].x = a_ge.x; prej[0].y = a_ge.y; prej[0].z = a->z; prej[0].infinity = 0; zr[0] = d.z; for (i = 1; i < n; i++) { secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); } /* * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only * the final point's z coordinate is actually used though, so just update that. */ secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); } /** Fill a table 'pre' with precomputed odd multiples of a. * * There are two versions of this function: * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its * resulting point set to a single constant Z denominator, stores the X and Y * coordinates as ge_storage points in pre, and stores the global Z in rz. * It only operates on tables sized for WINDOW_A wnaf multiples. * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its * resulting point set to actually affine points, and stores those in pre. * It operates on tables of any size. * * To compute a*P + b*G, we compute a table for P using the first function, * and for G using the second (which requires an inverse, but it only needs to * happen once). */ static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) { secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; /* Compute the odd multiples in Jacobian form. */ secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); /* Bring them to the same Z denominator. */ secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); } static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) { secp256k1_gej d; secp256k1_ge d_ge, p_ge; secp256k1_gej pj; secp256k1_fe zi; secp256k1_fe zr; secp256k1_fe dx_over_dz_squared; int i; VERIFY_CHECK(!a->infinity); secp256k1_gej_double_var(&d, a, NULL); /* First, we perform all the additions in an isomorphic curve obtained by multiplying * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use * `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store * the resulting y-coordinate and the z-ratio, since we only have enough memory to * store two field elements. These are sufficient to efficiently undo the isomorphism * and recompute all the `x`s. */ d_ge.x = d.x; d_ge.y = d.y; d_ge.infinity = 0; secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z); pj.x = p_ge.x; pj.y = p_ge.y; pj.z = a->z; pj.infinity = 0; for (i = 0; i < (n - 1); i++) { secp256k1_fe_normalize_var(&pj.y); secp256k1_fe_to_storage(&pre[i].y, &pj.y); secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr); secp256k1_fe_normalize_var(&zr); secp256k1_fe_to_storage(&pre[i].x, &zr); } /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */ secp256k1_fe_mul(&zi, &pj.z, &d.z); secp256k1_fe_inv_var(&zi, &zi); /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so * that we can combine it with the saved z-ratios to compute the other zs * without any more inversions. */ secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi); secp256k1_ge_to_storage(&pre[n - 1], &p_ge); /* Compute the actual x-coordinate of D, which will be needed below. */ secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ secp256k1_fe_sqr(&dx_over_dz_squared, &d.z); secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); /* Going into the second loop, we have set `pre[n-1]` to its final affine * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We * have `zi = (p.z * d.z)^-1`, where * * `p.z` is the z-coordinate of the point on the isomorphic curve * which was ultimately assigned to `pre[n-1]`. * `d.z` is the multiplier that must be applied to all z-coordinates * to move from our isomorphic curve back to secp256k1; so the * product `p.z * d.z` is the z-coordinate of the secp256k1 * point assigned to `pre[n-1]`. * * All subsequent inverse-z-coordinates can be obtained by multiplying this * factor by successive z-ratios, which is much more efficient than directly * computing each one. * * Importantly, these inverse-zs will be coordinates of points on secp256k1, * while our other stored values come from computations on the isomorphic * curve. So in the below loop, we will take care not to actually use `zi` * or any derived values until we're back on secp256k1. */ i = n - 1; while (i > 0) { secp256k1_fe zi2, zi3; const secp256k1_fe *rzr; i--; secp256k1_ge_from_storage(&p_ge, &pre[i]); /* For each remaining point, we extract the z-ratio from the stored * x-coordinate, compute its z^-1 from that, and compute the full * point from that. */ rzr = &p_ge.x; secp256k1_fe_mul(&zi, &zi, rzr); secp256k1_fe_sqr(&zi2, &zi); secp256k1_fe_mul(&zi3, &zi2, &zi); /* To compute the actual x-coordinate, we use the stored z ratio and * y-coordinate, which we obtained from `secp256k1_gej_add_ge_var` * in the loop above, as well as the inverse of the square of its * z-coordinate. We store the latter in the `zi2` variable, which is * computed iteratively starting from the overall Z inverse then * multiplying by each z-ratio in turn. * * Denoting the z-ratio as `rzr`, we observe that it is equal to `h` * from the inside of the above `gej_add_ge_var` call. This satisfies * * rzr = d_x * z^2 - x * d_z^2 * * where (`d_x`, `d_z`) are Jacobian coordinates of `D` and `(x, z)` * are Jacobian coordinates of our desired point -- except both are on * the isomorphic curve that we were using when we called `gej_add_ge_var`. * To get back to secp256k1, we must multiply both `z`s by `d_z`, or * equivalently divide both `x`s by `d_z^2`. Our equation then becomes * * rzr = d_x * z^2 / d_z^2 - x * * (The left-hand-side, being a ratio of z-coordinates, is unaffected * by the isomorphism.) * * Rearranging to solve for `x`, we have * * x = d_x * z^2 / d_z^2 - rzr * * But what we actually want is the affine coordinate `X = x/z^2`, * which will satisfy * * X = d_x / d_z^2 - rzr / z^2 * = dx_over_dz_squared - rzr * zi2 */ secp256k1_fe_mul(&p_ge.x, rzr, &zi2); secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1); secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared); /* y is stored_y/z^3, as we expect */ secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3); /* Store */ secp256k1_ge_to_storage(&pre[i], &p_ge); } } /** The following two macro retrieves a particular odd multiple from a table * of precomputed multiples. */ #define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ if ((n) > 0) { \ *(r) = (pre)[((n)-1)/2]; \ } else { \ *(r) = (pre)[(-(n)-1)/2]; \ secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) #define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \ VERIFY_CHECK(((n) & 1) == 1); \ VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ if ((n) > 0) { \ secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ } else { \ secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) -#ifdef USE_ENDOMORPHISM + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) -#endif ; static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { ctx->pre_g = NULL; -#ifdef USE_ENDOMORPHISM ctx->pre_g_128 = NULL; -#endif } static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) { secp256k1_gej gj; void* const base = *prealloc; size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; if (ctx->pre_g != NULL) { return; } /* get the generator */ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); { size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); /* check for overflow */ VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); } /* precompute the tables with odd multiples */ secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); -#ifdef USE_ENDOMORPHISM { secp256k1_gej g_128j; int i; size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G)); /* check for overflow */ VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); /* calculate 2^128*generator */ g_128j = gj; for (i = 0; i < 128; i++) { secp256k1_gej_double_var(&g_128j, &g_128j, NULL); } secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); } -#endif } static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) { if (src->pre_g != NULL) { /* We cast to void* first to suppress a -Wcast-align warning. */ dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); } -#ifdef USE_ENDOMORPHISM if (src->pre_g_128 != NULL) { dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); } -#endif } static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) { return ctx->pre_g != NULL; } static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) { secp256k1_ecmult_context_init(ctx); } /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), * with the following guarantees: * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) * - two non-zero entries in wnaf are separated by at least w-1 zeroes. * - the number of set values in wnaf is returned. This number is at most 256, and at most one more * than the number of bits in the (absolute value) of the input. */ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) { secp256k1_scalar s; int last_set_bit = -1; int bit = 0; int sign = 1; int carry = 0; VERIFY_CHECK(wnaf != NULL); VERIFY_CHECK(0 <= len && len <= 256); VERIFY_CHECK(a != NULL); VERIFY_CHECK(2 <= w && w <= 31); memset(wnaf, 0, len * sizeof(wnaf[0])); s = *a; if (secp256k1_scalar_get_bits(&s, 255, 1)) { secp256k1_scalar_negate(&s, &s); sign = -1; } while (bit < len) { int now; int word; if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { bit++; continue; } now = w; if (now > len - bit) { now = len - bit; } word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; carry = (word >> (w-1)) & 1; word -= carry << w; wnaf[bit] = sign * word; last_set_bit = bit; bit += now; } #ifdef VERIFY CHECK(carry == 0); while (bit < 256) { CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0); } #endif return last_set_bit + 1; } struct secp256k1_strauss_point_state { -#ifdef USE_ENDOMORPHISM secp256k1_scalar na_1, na_lam; int wnaf_na_1[130]; int wnaf_na_lam[130]; int bits_na_1; int bits_na_lam; -#else - int wnaf_na[256]; - int bits_na; -#endif size_t input_pos; }; struct secp256k1_strauss_state { secp256k1_gej* prej; secp256k1_fe* zr; secp256k1_ge* pre_a; -#ifdef USE_ENDOMORPHISM secp256k1_ge* pre_a_lam; -#endif struct secp256k1_strauss_point_state* ps; }; static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { secp256k1_ge tmpa; secp256k1_fe Z; -#ifdef USE_ENDOMORPHISM /* Splitted G factors. */ secp256k1_scalar ng_1, ng_128; int wnaf_ng_1[129]; int bits_ng_1 = 0; int wnaf_ng_128[129]; int bits_ng_128 = 0; -#else - int wnaf_ng[256]; - int bits_ng = 0; -#endif int i; int bits = 0; int np; int no = 0; for (np = 0; np < num; ++np) { if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) { continue; } state->ps[no].input_pos = np; -#ifdef USE_ENDOMORPHISM /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]); /* build wnaf representation for na_1 and na_lam. */ state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A); state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A); VERIFY_CHECK(state->ps[no].bits_na_1 <= 130); VERIFY_CHECK(state->ps[no].bits_na_lam <= 130); if (state->ps[no].bits_na_1 > bits) { bits = state->ps[no].bits_na_1; } if (state->ps[no].bits_na_lam > bits) { bits = state->ps[no].bits_na_lam; } -#else - /* build wnaf representation for na. */ - state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A); - if (state->ps[no].bits_na > bits) { - bits = state->ps[no].bits_na; - } -#endif ++no; } /* Calculate odd multiples of a. * All multiples are brought to the same Z 'denominator', which is stored * in Z. Due to secp256k1' isomorphism we can do all operations pretending * that the Z coordinate was 1, use affine addition formulae, and correct * the Z coordinate of the result once at the end. * The exception is the precomputed G table points, which are actually * affine. Compared to the base used for other points, they have a Z ratio * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same * isomorphism to efficiently add with a known Z inverse. */ if (no > 0) { /* Compute the odd multiples in Jacobian form. */ secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]); for (np = 1; np < no; ++np) { secp256k1_gej tmp = a[state->ps[np].input_pos]; #ifdef VERIFY secp256k1_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); #endif secp256k1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z)); secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); secp256k1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z)); } /* Bring them to the same Z denominator. */ secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr); } else { secp256k1_fe_set_int(&Z, 1); } -#ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]); } } if (ng) { /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ secp256k1_scalar_split_128(&ng_1, &ng_128, ng); /* Build wnaf representation for ng_1 and ng_128 */ bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); if (bits_ng_1 > bits) { bits = bits_ng_1; } if (bits_ng_128 > bits) { bits = bits_ng_128; } } -#else - if (ng) { - bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); - if (bits_ng > bits) { - bits = bits_ng; - } - } -#endif secp256k1_gej_set_infinity(r); for (i = bits - 1; i >= 0; i--) { int n; secp256k1_gej_double_var(r, r, NULL); -#ifdef USE_ENDOMORPHISM for (np = 0; np < no; ++np) { if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); } if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); } } if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); } if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); } -#else - for (np = 0; np < no; ++np) { - if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) { - ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); - } - } - if (i < bits_ng && (n = wnaf_ng[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); - } -#endif } if (!r->infinity) { secp256k1_fe_mul(&r->z, &r->z, &Z); } } static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; struct secp256k1_strauss_point_state ps[1]; -#ifdef USE_ENDOMORPHISM secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; -#endif struct secp256k1_strauss_state state; state.prej = prej; state.zr = zr; state.pre_a = pre_a; -#ifdef USE_ENDOMORPHISM state.pre_a_lam = pre_a_lam; -#endif state.ps = ps; secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); } static size_t secp256k1_strauss_scratch_size(size_t n_points) { -#ifdef USE_ENDOMORPHISM static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); -#else - static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); -#endif return n_points*point_size; } static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { secp256k1_gej* points; secp256k1_scalar* scalars; struct secp256k1_strauss_state state; size_t i; const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej)); scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar)); state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej)); state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); -#ifdef USE_ENDOMORPHISM state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A); -#else - state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); -#endif state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) { secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } for (i = 0; i < n_points; i++) { secp256k1_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } secp256k1_gej_set_ge(&points[i], &point); } secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; } /* Wrapper for secp256k1_ecmult_multi_func interface */ static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); } static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); } /** Convert a number to WNAF notation. * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. * It has the following guarantees: * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w) * - the number of words set is always WNAF_SIZE(w) * - the returned skew is 0 or 1 */ static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { int skew = 0; int pos; int max_pos; int last_w; const secp256k1_scalar *work = s; if (secp256k1_scalar_is_zero(s)) { for (pos = 0; pos < WNAF_SIZE(w); pos++) { wnaf[pos] = 0; } return 0; } if (secp256k1_scalar_is_even(s)) { skew = 1; } wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew; /* Compute last window size. Relevant when window size doesn't divide the * number of bits in the scalar */ last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; /* Store the position of the first nonzero word in max_pos to allow * skipping leading zeros when calculating the wnaf. */ for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if(val != 0) { break; } wnaf[pos] = 0; } max_pos = pos; pos = 1; while (pos <= max_pos) { int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); if ((val & 1) == 0) { wnaf[pos - 1] -= (1 << w); wnaf[pos] = (val + 1); } else { wnaf[pos] = val; } /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit * is strictly negative or strictly positive respectively. Only change * coefficients at previous positions because above code assumes that * wnaf[pos - 1] is odd. */ if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) { if (wnaf[pos - 1] == 1) { wnaf[pos - 2] += 1 << w; } else { wnaf[pos - 2] -= 1 << w; } wnaf[pos - 1] = 0; } ++pos; } return skew; } struct secp256k1_pippenger_point_state { int skew_na; size_t input_pos; }; struct secp256k1_pippenger_state { int *wnaf_na; struct secp256k1_pippenger_point_state* ps; }; /* * pippenger_wnaf computes the result of a multi-point multiplication as * follows: The scalars are brought into wnaf with n_wnaf elements each. Then * for every i < n_wnaf, first each point is added to a "bucket" corresponding * to the point's wnaf[i]. Second, the buckets are added together such that * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... */ static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) { size_t n_wnaf = WNAF_SIZE(bucket_window+1); size_t np; size_t no = 0; int i; int j; for (np = 0; np < num; ++np) { if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) { continue; } state->ps[no].input_pos = np; state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); no++; } secp256k1_gej_set_infinity(r); if (no == 0) { return 1; } for (i = n_wnaf - 1; i >= 0; i--) { secp256k1_gej running_sum; for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { secp256k1_gej_set_infinity(&buckets[j]); } for (np = 0; np < no; ++np) { int n = state->wnaf_na[np*n_wnaf + i]; struct secp256k1_pippenger_point_state point_state = state->ps[np]; secp256k1_ge tmp; int idx; if (i == 0) { /* correct for wnaf skew */ int skew = point_state.skew_na; if (skew) { secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); } } if (n > 0) { idx = (n - 1)/2; secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); } else if (n < 0) { idx = -(n + 1)/2; secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); } } for(j = 0; j < bucket_window; j++) { secp256k1_gej_double_var(r, r, NULL); } secp256k1_gej_set_infinity(&running_sum); /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) * using an intermediate running sum: * running_sum = bucket[0] + bucket[1] + bucket[2] + ... * * The doubling is done implicitly by deferring the final window doubling (of 'r'). */ for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); secp256k1_gej_add_var(r, r, &running_sum, NULL); } secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); secp256k1_gej_double_var(r, r, NULL); secp256k1_gej_add_var(r, r, &running_sum, NULL); } return 1; } /** * Returns optimal bucket_window (number of bits of a scalar represented by a * set of buckets) for a given number of points. */ static int secp256k1_pippenger_bucket_window(size_t n) { -#ifdef USE_ENDOMORPHISM if (n <= 1) { return 1; } else if (n <= 4) { return 2; } else if (n <= 20) { return 3; } else if (n <= 57) { return 4; } else if (n <= 136) { return 5; } else if (n <= 235) { return 6; } else if (n <= 1260) { return 7; } else if (n <= 4420) { return 9; } else if (n <= 7880) { return 10; } else if (n <= 16050) { return 11; } else { return PIPPENGER_MAX_BUCKET_WINDOW; } -#else - if (n <= 1) { - return 1; - } else if (n <= 11) { - return 2; - } else if (n <= 45) { - return 3; - } else if (n <= 100) { - return 4; - } else if (n <= 275) { - return 5; - } else if (n <= 625) { - return 6; - } else if (n <= 1850) { - return 7; - } else if (n <= 3400) { - return 8; - } else if (n <= 9630) { - return 9; - } else if (n <= 17900) { - return 10; - } else if (n <= 32800) { - return 11; - } else { - return PIPPENGER_MAX_BUCKET_WINDOW; - } -#endif } /** * Returns the maximum optimal number of points for a bucket_window. */ static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { switch(bucket_window) { -#ifdef USE_ENDOMORPHISM case 1: return 1; case 2: return 4; case 3: return 20; case 4: return 57; case 5: return 136; case 6: return 235; case 7: return 1260; case 8: return 1260; case 9: return 4420; case 10: return 7880; case 11: return 16050; case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; -#else - case 1: return 1; - case 2: return 11; - case 3: return 45; - case 4: return 100; - case 5: return 275; - case 6: return 625; - case 7: return 1850; - case 8: return 3400; - case 9: return 9630; - case 10: return 17900; - case 11: return 32800; - case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; -#endif } return 0; } -#ifdef USE_ENDOMORPHISM SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) { secp256k1_scalar tmp = *s1; secp256k1_scalar_split_lambda(s1, s2, &tmp); secp256k1_ge_mul_lambda(p2, p1); if (secp256k1_scalar_is_high(s1)) { secp256k1_scalar_negate(s1, s1); secp256k1_ge_neg(p1, p1); } if (secp256k1_scalar_is_high(s2)) { secp256k1_scalar_negate(s2, s2); secp256k1_ge_neg(p2, p2); } } -#endif /** * Returns the scratch size required for a given number of points (excluding * base point G) without considering alignment. */ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) { -#ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; -#else - size_t entries = n_points + 1; -#endif size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size; } static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); - /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch + /* Use 2(n+1) with the endomorphism, when calculating batch * sizes. The reason for +1 is that we add the G scalar to the list of * other scalars. */ -#ifdef USE_ENDOMORPHISM size_t entries = 2*n_points + 2; -#else - size_t entries = n_points + 1; -#endif secp256k1_ge *points; secp256k1_scalar *scalars; secp256k1_gej *buckets; struct secp256k1_pippenger_state *state_space; size_t idx = 0; size_t point_idx = 0; int i, j; int bucket_window; (void)ctx; secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } bucket_window = secp256k1_pippenger_bucket_window(n_points); points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space)); if (points == NULL || scalars == NULL || state_space == NULL) { secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } if (inp_g_sc != NULL) { scalars[0] = *inp_g_sc; points[0] = secp256k1_ge_const_g; idx++; -#ifdef USE_ENDOMORPHISM secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); idx++; -#endif } while (point_idx < n_points) { if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } idx++; -#ifdef USE_ENDOMORPHISM secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); idx++; -#endif point_idx++; } secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); /* Clear data */ for(i = 0; (size_t)i < idx; i++) { secp256k1_scalar_clear(&scalars[i]); state_space->ps[i].skew_na = 0; for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; } } for(i = 0; i < 1< max_alloc) { break; } space_for_points = max_alloc - space_overhead; n_points = space_for_points/entry_size; n_points = n_points > max_points ? max_points : n_points; if (n_points > res) { res = n_points; } if (n_points < max_points) { /* A larger bucket_window may support even more points. But if we * would choose that then the caller couldn't safely use any number * smaller than what this function returns */ break; } } return res; } /* Computes ecmult_multi by simply multiplying and adding each point. Does not * require a scratch space */ static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { size_t point_idx; secp256k1_scalar szero; secp256k1_gej tmpj; secp256k1_scalar_set_int(&szero, 0); secp256k1_gej_set_infinity(r); secp256k1_gej_set_infinity(&tmpj); /* r = inp_g_sc*G */ secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); for (point_idx = 0; point_idx < n_points; point_idx++) { secp256k1_ge point; secp256k1_gej pointj; secp256k1_scalar scalar; if (!cb(&scalar, &point, point_idx, cbdata)) { return 0; } /* r += scalar*point */ secp256k1_gej_set_ge(&pointj, &point); secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); secp256k1_gej_add_var(r, r, &tmpj, NULL); } return 1; } /* Compute the number of batches and the batch size given the maximum batch size and the * total number of points */ static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { if (max_n_batch_points == 0) { return 0; } if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) { max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; } if (n == 0) { *n_batches = 0; *n_batch_points = 0; return 1; } /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */ *n_batches = 1 + (n - 1) / max_n_batch_points; *n_batch_points = 1 + (n - 1) / *n_batches; return 1; } typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n == 0) { return 1; } else if (n == 0) { secp256k1_scalar szero; secp256k1_scalar_set_int(&szero, 0); secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc); return 1; } if (scratch == NULL) { return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. * As a first step check if there's enough space for Pippenger's algo (which requires less space * than Strauss' algo) and if not, use the simple algorithm. */ if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) { return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { f = secp256k1_ecmult_pippenger_batch; } else { if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) { return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } f = secp256k1_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; secp256k1_gej tmp; if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } secp256k1_gej_add_var(r, r, &tmp, NULL); n -= nbp; } return 1; } #endif /* SECP256K1_ECMULT_IMPL_H */ diff --git a/src/secp256k1/src/group.h b/src/secp256k1/src/group.h index c78100cc5..36e39ecf0 100644 --- a/src/secp256k1/src/group.h +++ b/src/secp256k1/src/group.h @@ -1,153 +1,151 @@ /********************************************************************** * Copyright (c) 2013, 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_GROUP_H #define SECP256K1_GROUP_H #include "num.h" #include "field.h" /** A group element of the secp256k1 curve, in affine coordinates. */ typedef struct { secp256k1_fe x; secp256k1_fe y; int infinity; /* whether this represents the point at infinity */ } secp256k1_ge; #define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} #define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} /** A group element of the secp256k1 curve, in jacobian coordinates. */ typedef struct { secp256k1_fe x; /* actual X: x/z^2 */ secp256k1_fe y; /* actual Y: y/z^3 */ secp256k1_fe z; int infinity; /* whether this represents the point at infinity */ } secp256k1_gej; #define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} #define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} typedef struct { secp256k1_fe_storage x; secp256k1_fe_storage y; } secp256k1_ge_storage; #define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} #define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y) /** Set a group element equal to the point with given X and Y coordinates */ static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y); /** Set a group element (affine) equal to the point with the given X coordinate * and a Y coordinate that is a quadratic residue modulo p. The return value * is true iff a coordinate with the given X coordinate exists. */ static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x); /** Set a group element (affine) equal to the point with the given X coordinate, and given oddness * for Y. Return value indicates whether the result is valid. */ static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd); /** Check whether a group element is the point at infinity. */ static int secp256k1_ge_is_infinity(const secp256k1_ge *a); /** Check whether a group element is valid (i.e., on the curve). */ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); /** Set a group element equal to another which is given in jacobian coordinates */ static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len); /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to * the same global z "denominator". zr must contain the known z-ratios such * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y * coordinates of the result are stored in r, the common z coordinate is * stored in globalz. */ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr); /** Set a group element (affine) equal to the point at infinity. */ static void secp256k1_ge_set_infinity(secp256k1_ge *r); /** Set a group element (jacobian) equal to the point at infinity. */ static void secp256k1_gej_set_infinity(secp256k1_gej *r); /** Set a group element (jacobian) equal to another which is given in affine coordinates. */ static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a); /** Compare the X coordinate of a group element (jacobian). */ static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a); /** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a); /** Check whether a group element is the point at infinity. */ static int secp256k1_gej_is_infinity(const secp256k1_gej *a); /** Check whether a group element's y coordinate is a quadratic residue. */ static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); /** Set r equal to the double of a. Constant time. */ static void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a); /** Set r equal to the double of a. If rzr is not-NULL this sets *rzr such that r->z == a->z * *rzr (where infinity means an implicit z = 0). */ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); /** Set r equal to the sum of a and b. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr); /** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b); /** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time guarantee, and b is allowed to be infinity. If rzr is non-NULL this sets *rzr such that r->z == a->z * *rzr (a cannot be infinity in that case). */ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr); /** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv); -#ifdef USE_ENDOMORPHISM /** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a); -#endif /** Clear a secp256k1_gej to prevent leaking sensitive information. */ static void secp256k1_gej_clear(secp256k1_gej *r); /** Clear a secp256k1_ge to prevent leaking sensitive information. */ static void secp256k1_ge_clear(secp256k1_ge *r); /** Convert a group element to the storage type. */ static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a); /** Convert a group element back from the storage type. */ static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag); /** Rescale a jacobian point by b which must be non-zero. Constant-time. */ static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b); /** Determine if a point (which is assumed to be on the curve) is in the correct (sub)group of the curve. * * In normal mode, the used group is secp256k1, which has cofactor=1 meaning that every point on the curve is in the * group, and this function returns always true. * * When compiling in exhaustive test mode, a slightly different curve equation is used, leading to a group with a * (very) small subgroup, and that subgroup is what is used for all cryptographic operations. In that mode, this * function checks whether a point that is on the curve is in fact also in that subgroup. */ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge); #endif /* SECP256K1_GROUP_H */ diff --git a/src/secp256k1/src/group_impl.h b/src/secp256k1/src/group_impl.h index 3d62e1021..a5fbc91a0 100644 --- a/src/secp256k1/src/group_impl.h +++ b/src/secp256k1/src/group_impl.h @@ -1,695 +1,693 @@ /********************************************************************** * Copyright (c) 2013, 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_GROUP_IMPL_H #define SECP256K1_GROUP_IMPL_H #include "num.h" #include "field.h" #include "group.h" /* These exhaustive group test orders and generators are chosen such that: * - The field size is equal to that of secp256k1, so field code is the same. * - The curve equation is of the form y^2=x^3+B for some constant B. * - The subgroup has a generator 2*P, where P.x=1. * - The subgroup has size less than 1000 to permit exhaustive testing. * - The subgroup admits an endomorphism of the form lambda*(x,y) == (beta*x,y). * * These parameters are generated using sage/gen_exhaustive_groups.sage. */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 13 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xc3459c3d, 0x35326167, 0xcd86cce8, 0x07a2417f, 0x5b8bd567, 0xde8538ee, 0x0d507b0c, 0xd128f5bb, 0x8e467fec, 0xcd30000a, 0x6cc1184e, 0x25d382c2, 0xa2f4494e, 0x2fbe9abc, 0x8b64abac, 0xd005fb24 ); static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( 0x3d3486b2, 0x159a9ca5, 0xc75638be, 0xb23a69bc, 0x946a45ab, 0x24801247, 0xb4ed2b8e, 0x26b6a417 ); # elif EXHAUSTIVE_TEST_ORDER == 199 static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0x226e653f, 0xc8df7744, 0x9bacbf12, 0x7d1dcbf9, 0x87f05b2a, 0xe7edbd28, 0x1f564575, 0xc48dcf18, 0xa13872c2, 0xe933bb17, 0x5d9ffd5b, 0xb5b6e10c, 0x57fe3c00, 0xbaaaa15a, 0xe003ec3e, 0x9c269bae ); static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST( 0x2cca28fa, 0xfc614b80, 0x2a3db42b, 0x00ba00b1, 0xbea8d943, 0xdace9ab2, 0x9536daea, 0x0074defb ); # else # error No known generator for the specified exhaustive test group order. # endif #else /** Generator for secp256k1, value 'g' defined in * "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL, 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL, 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL, 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL ); static const secp256k1_fe secp256k1_fe_const_b = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 7); #endif static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { secp256k1_fe zi2; secp256k1_fe zi3; secp256k1_fe_sqr(&zi2, zi); secp256k1_fe_mul(&zi3, &zi2, zi); secp256k1_fe_mul(&r->x, &a->x, &zi2); secp256k1_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; } static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) { r->infinity = 0; r->x = *x; r->y = *y; } static int secp256k1_ge_is_infinity(const secp256k1_ge *a) { return a->infinity; } static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) { *r = *a; secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_negate(&r->y, &r->y, 1); } static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe z2, z3; r->infinity = a->infinity; secp256k1_fe_inv(&a->z, &a->z); secp256k1_fe_sqr(&z2, &a->z); secp256k1_fe_mul(&z3, &a->z, &z2); secp256k1_fe_mul(&a->x, &a->x, &z2); secp256k1_fe_mul(&a->y, &a->y, &z3); secp256k1_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; } static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe z2, z3; r->infinity = a->infinity; if (a->infinity) { return; } secp256k1_fe_inv_var(&a->z, &a->z); secp256k1_fe_sqr(&z2, &a->z); secp256k1_fe_mul(&z3, &a->z, &z2); secp256k1_fe_mul(&a->x, &a->x, &z2); secp256k1_fe_mul(&a->y, &a->y, &z3); secp256k1_fe_set_int(&a->z, 1); r->x = a->x; r->y = a->y; } static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) { secp256k1_fe u; size_t i; size_t last_i = SIZE_MAX; for (i = 0; i < len; i++) { if (!a[i].infinity) { /* Use destination's x coordinates as scratch space */ if (last_i == SIZE_MAX) { r[i].x = a[i].z; } else { secp256k1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); } last_i = i; } } if (last_i == SIZE_MAX) { return; } secp256k1_fe_inv_var(&u, &r[last_i].x); i = last_i; while (i > 0) { i--; if (!a[i].infinity) { secp256k1_fe_mul(&r[last_i].x, &r[i].x, &u); secp256k1_fe_mul(&u, &u, &a[last_i].z); last_i = i; } } VERIFY_CHECK(!a[last_i].infinity); r[last_i].x = u; for (i = 0; i < len; i++) { r[i].infinity = a[i].infinity; if (!a[i].infinity) { secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } } } static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) { size_t i = len - 1; secp256k1_fe zs; if (len > 0) { /* The z of the final point gives us the "global Z" for the table. */ r[i].x = a[i].x; r[i].y = a[i].y; /* Ensure all y values are in weak normal form for fast negation of points */ secp256k1_fe_normalize_weak(&r[i].y); *globalz = a[i].z; r[i].infinity = 0; zs = zr[i]; /* Work our way backwards, using the z-ratios to scale the x/y values. */ while (i > 0) { if (i != len - 1) { secp256k1_fe_mul(&zs, &zs, &zr[i]); } i--; secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs); } } } static void secp256k1_gej_set_infinity(secp256k1_gej *r) { r->infinity = 1; secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->z); } static void secp256k1_ge_set_infinity(secp256k1_ge *r) { r->infinity = 1; secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->y); } static void secp256k1_gej_clear(secp256k1_gej *r) { r->infinity = 0; secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->z); } static void secp256k1_ge_clear(secp256k1_ge *r) { r->infinity = 0; secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->y); } static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { secp256k1_fe x2, x3; r->x = *x; secp256k1_fe_sqr(&x2, x); secp256k1_fe_mul(&x3, x, &x2); r->infinity = 0; secp256k1_fe_add(&x3, &secp256k1_fe_const_b); return secp256k1_fe_sqrt(&r->y, &x3); } static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { if (!secp256k1_ge_set_xquad(r, x)) { return 0; } secp256k1_fe_normalize_var(&r->y); if (secp256k1_fe_is_odd(&r->y) != odd) { secp256k1_fe_negate(&r->y, &r->y, 1); } return 1; } static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; secp256k1_fe_set_int(&r->z, 1); } static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) { secp256k1_fe r, r2; VERIFY_CHECK(!a->infinity); secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x); r2 = a->x; secp256k1_fe_normalize_weak(&r2); return secp256k1_fe_equal_var(&r, &r2); } static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) { r->infinity = a->infinity; r->x = a->x; r->y = a->y; r->z = a->z; secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_negate(&r->y, &r->y, 1); } static int secp256k1_gej_is_infinity(const secp256k1_gej *a) { return a->infinity; } static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { secp256k1_fe y2, x3; if (a->infinity) { return 0; } /* y^2 = x^3 + 7 */ secp256k1_fe_sqr(&y2, &a->y); secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); secp256k1_fe_add(&x3, &secp256k1_fe_const_b); secp256k1_fe_normalize_weak(&x3); return secp256k1_fe_equal_var(&y2, &x3); } static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) { /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. * * Note that there is an implementation described at * https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l * which trades a multiply for a square, but in practice this is actually slower, * mainly because it requires more normalizations. */ secp256k1_fe t1,t2,t3,t4; r->infinity = a->infinity; secp256k1_fe_mul(&r->z, &a->z, &a->y); secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ secp256k1_fe_sqr(&t1, &a->x); secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ secp256k1_fe_sqr(&t3, &a->y); secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ secp256k1_fe_sqr(&t4, &t3); secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ r->x = t3; secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ } static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. * * Having said this, if this function receives a point on a sextic twist, e.g. by * a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6, * since -6 does have a cube root mod p. For this point, this function will not set * the infinity flag even though the point doubles to infinity, and the result * point will be gibberish (z = 0 but infinity = 0). */ if (a->infinity) { r->infinity = 1; if (rzr != NULL) { secp256k1_fe_set_int(rzr, 1); } return; } if (rzr != NULL) { *rzr = a->y; secp256k1_fe_normalize_weak(rzr); secp256k1_fe_mul_int(rzr, 2); } secp256k1_gej_double(r, a); } static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); *r = *b; return; } if (b->infinity) { if (rzr != NULL) { secp256k1_fe_set_int(rzr, 1); } *r = *a; return; } r->infinity = 0; secp256k1_fe_sqr(&z22, &b->z); secp256k1_fe_sqr(&z12, &a->z); secp256k1_fe_mul(&u1, &a->x, &z22); secp256k1_fe_mul(&u2, &b->x, &z12); secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z); secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); if (secp256k1_fe_normalizes_to_zero_var(&h)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) { secp256k1_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { secp256k1_fe_set_int(rzr, 0); } secp256k1_gej_set_infinity(r); } return; } secp256k1_fe_sqr(&i2, &i); secp256k1_fe_sqr(&h2, &h); secp256k1_fe_mul(&h3, &h, &h2); secp256k1_fe_mul(&h, &h, &b->z); if (rzr != NULL) { *rzr = h; } secp256k1_fe_mul(&r->z, &a->z, &h); secp256k1_fe_mul(&t, &u1, &h2); r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); secp256k1_fe_add(&r->y, &h3); } static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) { /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (a->infinity) { VERIFY_CHECK(rzr == NULL); secp256k1_gej_set_ge(r, b); return; } if (b->infinity) { if (rzr != NULL) { secp256k1_fe_set_int(rzr, 1); } *r = *a; return; } r->infinity = 0; secp256k1_fe_sqr(&z12, &a->z); u1 = a->x; secp256k1_fe_normalize_weak(&u1); secp256k1_fe_mul(&u2, &b->x, &z12); s1 = a->y; secp256k1_fe_normalize_weak(&s1); secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); if (secp256k1_fe_normalizes_to_zero_var(&h)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) { secp256k1_gej_double_var(r, a, rzr); } else { if (rzr != NULL) { secp256k1_fe_set_int(rzr, 0); } secp256k1_gej_set_infinity(r); } return; } secp256k1_fe_sqr(&i2, &i); secp256k1_fe_sqr(&h2, &h); secp256k1_fe_mul(&h3, &h, &h2); if (rzr != NULL) { *rzr = h; } secp256k1_fe_mul(&r->z, &a->z, &h); secp256k1_fe_mul(&t, &u1, &h2); r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); secp256k1_fe_add(&r->y, &h3); } static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) { /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; if (b->infinity) { *r = *a; return; } if (a->infinity) { secp256k1_fe bzinv2, bzinv3; r->infinity = b->infinity; secp256k1_fe_sqr(&bzinv2, bzinv); secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv); secp256k1_fe_mul(&r->x, &b->x, &bzinv2); secp256k1_fe_mul(&r->y, &b->y, &bzinv3); secp256k1_fe_set_int(&r->z, 1); return; } r->infinity = 0; /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to * secp256k1's isomorphism we can multiply the Z coordinates on both sides * by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1). * This means that (rx,ry,rz) can be calculated as * (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz. * The variable az below holds the modified Z coordinate for a, which is used * for the computation of rx and ry, but not for rz. */ secp256k1_fe_mul(&az, &a->z, bzinv); secp256k1_fe_sqr(&z12, &az); u1 = a->x; secp256k1_fe_normalize_weak(&u1); secp256k1_fe_mul(&u2, &b->x, &z12); s1 = a->y; secp256k1_fe_normalize_weak(&s1); secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az); secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); if (secp256k1_fe_normalizes_to_zero_var(&h)) { if (secp256k1_fe_normalizes_to_zero_var(&i)) { secp256k1_gej_double_var(r, a, NULL); } else { secp256k1_gej_set_infinity(r); } return; } secp256k1_fe_sqr(&i2, &i); secp256k1_fe_sqr(&h2, &h); secp256k1_fe_mul(&h3, &h, &h2); r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h); secp256k1_fe_mul(&t, &u1, &h2); r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); secp256k1_fe_add(&r->y, &h3); } static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) { /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; secp256k1_fe m_alt, rr_alt; int infinity, degenerate; VERIFY_CHECK(!b->infinity); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); /** In: * Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks. * In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002. * we find as solution for a unified addition/doubling formula: * lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation. * x3 = lambda^2 - (x1 + x2) * 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2). * * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: * U1 = X1*Z2^2, U2 = X2*Z1^2 * S1 = Y1*Z2^3, S2 = Y2*Z1^3 * Z = Z1*Z2 * T = U1+U2 * M = S1+S2 * Q = T*M^2 * R = T^2-U1*U2 * X3 = 4*(R^2-Q) * Y3 = 4*(R*(3*Q-2*R^2)-M^4) * Z3 = 2*M*Z * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) * * This formula has the benefit of being the same for both addition * of distinct points and doubling. However, it breaks down in the * case that either point is infinity, or that y1 = -y2. We handle * these cases in the following ways: * * - If b is infinity we simply bail by means of a VERIFY_CHECK. * * - If a is infinity, we detect this, and at the end of the * computation replace the result (which will be meaningless, * but we compute to be constant-time) with b.x : b.y : 1. * * - If a = -b, we have y1 = -y2, which is a degenerate case. * But here the answer is infinity, so we simply set the * infinity flag of the result, overriding the computed values * without even needing to cmov. * * - If y1 = -y2 but x1 != x2, which does occur thanks to certain * properties of our curve (specifically, 1 has nontrivial cube * roots in our field, and the curve equation has no x coefficient) * then the answer is not infinity but also not given by the above * equation. In this case, we cmov in place an alternate expression * for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these * expressions for lambda are defined, they are equal, and can be * obtained from each other by multiplication by (y1 + y2)/(y1 + y2) * then substitution of x^3 + 7 for y^2 (using the curve equation). * For all pairs of nonzero points (a, b) at least one is defined, * so this covers everything. */ secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ /** If lambda = R/M = 0/0 we have a problem (except in the "trivial" * case that Z = z1z2 = 0, and this is special-cased later on). */ degenerate = secp256k1_fe_normalizes_to_zero(&m) & secp256k1_fe_normalizes_to_zero(&rr); /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. * This means either x1 == beta*x2 or beta*x1 == x2, where beta is * a nontrivial cube root of one. In either case, an alternate * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), * so we set R/M equal to this. */ rr_alt = s1; secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ secp256k1_fe_cmov(&rr_alt, &rr, !degenerate); secp256k1_fe_cmov(&m_alt, &m, !degenerate); /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0. * From here on out Ralt and Malt represent the numerator * and denominator of lambda; R and M represent the explicit * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ /* These two lines use the observation that either M == Malt or M == 0, * so M^3 * Malt is either Malt^4 (which is computed by squaring), or * zero (which is "computed" by cmov). So the cost is one squaring * versus two multiplications. */ secp256k1_fe_sqr(&n, &n); secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ secp256k1_fe_normalize_weak(&t); r->x = t; /* r->x = Ralt^2-Q (1) */ secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ secp256k1_fe_cmov(&r->x, &b->x, a->infinity); secp256k1_fe_cmov(&r->y, &b->y, a->infinity); secp256k1_fe_cmov(&r->z, &fe_1, a->infinity); r->infinity = infinity; } static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) { /* Operations: 4 mul, 1 sqr */ secp256k1_fe zz; VERIFY_CHECK(!secp256k1_fe_is_zero(s)); secp256k1_fe_sqr(&zz, s); secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ secp256k1_fe_mul(&r->y, &r->y, &zz); secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ } static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) { secp256k1_fe x, y; VERIFY_CHECK(!a->infinity); x = a->x; secp256k1_fe_normalize(&x); y = a->y; secp256k1_fe_normalize(&y); secp256k1_fe_to_storage(&r->x, &x); secp256k1_fe_to_storage(&r->y, &y); } static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) { secp256k1_fe_from_storage(&r->x, &a->x); secp256k1_fe_from_storage(&r->y, &a->y); r->infinity = 0; } static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) { secp256k1_fe_storage_cmov(&r->x, &a->x, flag); secp256k1_fe_storage_cmov(&r->y, &a->y, flag); } -#ifdef USE_ENDOMORPHISM static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { static const secp256k1_fe beta = SECP256K1_FE_CONST( 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul ); *r = *a; secp256k1_fe_mul(&r->x, &r->x, &beta); } -#endif static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { secp256k1_fe yz; if (a->infinity) { return 0; } /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z is */ secp256k1_fe_mul(&yz, &a->y, &a->z); return secp256k1_fe_is_quad_var(&yz); } static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) { #ifdef EXHAUSTIVE_TEST_ORDER secp256k1_gej out; int i; /* A very simple EC multiplication ladder that avoids a dependecy on ecmult. */ secp256k1_gej_set_infinity(&out); for (i = 0; i < 32; ++i) { secp256k1_gej_double_var(&out, &out, NULL); if ((((uint32_t)EXHAUSTIVE_TEST_ORDER) >> (31 - i)) & 1) { secp256k1_gej_add_ge_var(&out, &out, ge, NULL); } } return secp256k1_gej_is_infinity(&out); #else (void)ge; /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ return 1; #endif } #endif /* SECP256K1_GROUP_IMPL_H */ diff --git a/src/secp256k1/src/libsecp256k1-config.h.cmake.in b/src/secp256k1/src/libsecp256k1-config.h.cmake.in index 06e3d477b..2ed4d7ead 100644 --- a/src/secp256k1/src/libsecp256k1-config.h.cmake.in +++ b/src/secp256k1/src/libsecp256k1-config.h.cmake.in @@ -1,43 +1,42 @@ /* Copyright (c) 2017 The Bitcoin developers */ #ifndef LIBSECP256K1_CONFIG_H #define LIBSECP256K1_CONFIG_H #cmakedefine HAVE___INT128 #cmakedefine USE_NUM_GMP #cmakedefine USE_FIELD_INV_NUM #cmakedefine USE_SCALAR_INV_NUM #cmakedefine USE_NUM_NONE #cmakedefine USE_FIELD_INV_BUILTIN #cmakedefine USE_SCALAR_INV_BUILTIN #cmakedefine USE_SCALAR_4X64 #cmakedefine USE_FIELD_5X52 #cmakedefine USE_SCALAR_8X32 #cmakedefine USE_FIELD_10X26 #cmakedefine USE_ASM_X86_64 #cmakedefine USE_EXTERNAL_ASM -#cmakedefine USE_ENDOMORPHISM #cmakedefine USE_EXTERNAL_DEFAULT_CALLBACKS #cmakedefine USE_ECMULT_STATIC_PRECOMPUTATION #define ECMULT_WINDOW_SIZE ${SECP256K1_ECMULT_WINDOW_SIZE} #define ECMULT_GEN_PREC_BITS ${SECP256K1_ECMULT_GEN_PRECISION} #cmakedefine ENABLE_MODULE_ECDH #cmakedefine ENABLE_MODULE_MULTISET #cmakedefine ENABLE_MODULE_RECOVERY #cmakedefine ENABLE_MODULE_SCHNORR #cmakedefine ENABLE_MODULE_EXTRAKEYS #cmakedefine ENABLE_MODULE_SCHNORRSIG #cmakedefine ENABLE_OPENSSL_TESTS #cmakedefine COVERAGE #endif /* LIBSECP256K1_CONFIG_H */ diff --git a/src/secp256k1/src/scalar.h b/src/secp256k1/src/scalar.h index ed01c7ec9..fb3fb187c 100644 --- a/src/secp256k1/src/scalar.h +++ b/src/secp256k1/src/scalar.h @@ -1,119 +1,117 @@ /********************************************************************** * Copyright (c) 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_SCALAR_H #define SECP256K1_SCALAR_H #include "num.h" #include "util.h" #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" #endif #if defined(EXHAUSTIVE_TEST_ORDER) #include "scalar_low.h" #elif defined(SECP256K1_WIDEMUL_INT128) #include "scalar_4x64.h" #elif defined(SECP256K1_WIDEMUL_INT64) #include "scalar_8x32.h" #else #error "Please select wide multiplication implementation" #endif /** Clear a scalar to prevent the leak of sensitive data. */ static void secp256k1_scalar_clear(secp256k1_scalar *r); /** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count); /** Access bits from a scalar. Not constant time. */ static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count); /** Set a scalar from a big endian byte array. The scalar will be reduced modulo group order `n`. * In: bin: pointer to a 32-byte array. * Out: r: scalar to be set. * overflow: non-zero if the scalar was bigger or equal to `n` before reduction, zero otherwise (can be NULL). */ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow); /** Set a scalar from a big endian byte array and returns 1 if it is a valid * seckey and 0 otherwise. */ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin); /** Set a scalar to an unsigned integer. */ static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v); /** Convert a scalar to a byte array. */ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a); /** Add two scalars together (modulo the group order). Returns whether it overflowed. */ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); /** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag); /** Multiply two scalars (modulo the group order). */ static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); /** Shift a scalar right by some amount strictly between 0 and 16, returning * the low bits that were shifted off */ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n); /** Compute the square of a scalar (modulo the group order). */ static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a); /** Compute the inverse of a scalar (modulo the group order). */ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a); /** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a); /** Compute the complement of a scalar (modulo the group order). */ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a); /** Check whether a scalar equals zero. */ static int secp256k1_scalar_is_zero(const secp256k1_scalar *a); /** Check whether a scalar equals one. */ static int secp256k1_scalar_is_one(const secp256k1_scalar *a); /** Check whether a scalar, considered as an nonnegative integer, is even. */ static int secp256k1_scalar_is_even(const secp256k1_scalar *a); /** Check whether a scalar is higher than the group order divided by 2. */ static int secp256k1_scalar_is_high(const secp256k1_scalar *a); /** Conditionally negate a number, in constant time. * Returns -1 if the number was negated, 1 otherwise */ static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag); #ifndef USE_NUM_NONE /** Convert a scalar to a number. */ static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a); /** Get the order of the group as a number. */ static void secp256k1_scalar_order_get_num(secp256k1_num *r); #endif /** Compare two scalars. */ static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b); -#ifdef USE_ENDOMORPHISM /** Find r1 and r2 such that r1+r2*2^128 = k. */ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k); /** Find r1 and r2 such that r1+r2*lambda = k, * where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k); -#endif /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); /** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag); #endif /* SECP256K1_SCALAR_H */ diff --git a/src/secp256k1/src/scalar_4x64_impl.h b/src/secp256k1/src/scalar_4x64_impl.h index 3d8d2a581..73cbd5e18 100644 --- a/src/secp256k1/src/scalar_4x64_impl.h +++ b/src/secp256k1/src/scalar_4x64_impl.h @@ -1,960 +1,958 @@ /********************************************************************** * Copyright (c) 2013, 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_SCALAR_REPR_IMPL_H #define SECP256K1_SCALAR_REPR_IMPL_H /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) /* Limbs of 2^256 minus the secp256k1 order. */ #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) #define SECP256K1_N_C_1 (~SECP256K1_N_1) #define SECP256K1_N_C_2 (1) /* Limbs of half the secp256k1 order. */ #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 6 == offset >> 6) { return secp256k1_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 6) + 1 < 4); return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); } } SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ no |= (a->d[2] < SECP256K1_N_2); yes |= (a->d[2] > SECP256K1_N_2) & ~no; no |= (a->d[1] < SECP256K1_N_1); yes |= (a->d[1] > SECP256K1_N_1) & ~no; yes |= (a->d[0] >= SECP256K1_N_0) & ~no; return yes; } SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) { uint128_t t; VERIFY_CHECK(overflow <= 1); t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0; r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1; r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2; r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint64_t)r->d[3]; r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; return overflow; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { int overflow; uint128_t t = (uint128_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[1] + b->d[1]; r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[2] + b->d[2]; r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[3] + b->d[3]; r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; overflow = t + secp256k1_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); secp256k1_scalar_reduce(r, overflow); return overflow; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { uint128_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 64) == 0); VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); #endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56; r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56; r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56; r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56; over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3]; bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2]; bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1]; bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 64; t += (uint128_t)(~a->d[1]) + SECP256K1_N_1; r->d[1] = t & nonzero; t >>= 64; t += (uint128_t)(~a->d[2]) + SECP256K1_N_2; r->d[2] = t & nonzero; t >>= 64; t += (uint128_t)(~a->d[3]) + SECP256K1_N_3; r->d[3] = t & nonzero; } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[3] < SECP256K1_N_H_3); yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; return yes; } static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ uint64_t mask = !flag - 1; uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 64; t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); r->d[1] = t & nonzero; t >>= 64; t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); r->d[2] = t & nonzero; t >>= 64; t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); r->d[3] = t & nonzero; return 2 * (mask == 0) - 1; } /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ #define muladd(a,b) { \ uint64_t tl, th; \ { \ uint128_t t = (uint128_t)a * b; \ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ c1 += th; /* overflow is handled on the next line */ \ c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ } /** Add a*b to the number defined by (c0,c1). c1 must never overflow. */ #define muladd_fast(a,b) { \ uint64_t tl, th; \ { \ uint128_t t = (uint128_t)a * b; \ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ c1 += th; /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK(c1 >= th); \ } /** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ #define muladd2(a,b) { \ uint64_t tl, th, th2, tl2; \ { \ uint128_t t = (uint128_t)a * b; \ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ tl = t; \ } \ th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \ c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \ th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ c0 += tl2; /* overflow is handled on the next line */ \ th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ c1 += th2; /* overflow is handled on the next line */ \ c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ } /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ #define sumadd(a) { \ unsigned int over; \ c0 += (a); /* overflow is handled on the next line */ \ over = (c0 < (a)); \ c1 += over; /* overflow is handled on the next line */ \ c2 += (c1 < over); /* never overflows by contract */ \ } /** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ #define sumadd_fast(a) { \ c0 += (a); /* overflow is handled on the next line */ \ c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ VERIFY_CHECK(c2 == 0); \ } /** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */ #define extract(n) { \ (n) = c0; \ c0 = c1; \ c1 = c2; \ c2 = 0; \ } /** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */ #define extract_fast(n) { \ (n) = c0; \ c0 = c1; \ c1 = 0; \ VERIFY_CHECK(c2 == 0); \ } static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) { #ifdef USE_ASM_X86_64 /* Reduce 512 bits into 385. */ uint64_t m0, m1, m2, m3, m4, m5, m6; uint64_t p0, p1, p2, p3, p4; uint64_t c; __asm__ __volatile__( /* Preload. */ "movq 32(%%rsi), %%r11\n" "movq 40(%%rsi), %%r12\n" "movq 48(%%rsi), %%r13\n" "movq 56(%%rsi), %%r14\n" /* Initialize r8,r9,r10 */ "movq 0(%%rsi), %%r8\n" "xorq %%r9, %%r9\n" "xorq %%r10, %%r10\n" /* (r8,r9) += n0 * c0 */ "movq %8, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" /* extract m0 */ "movq %%r8, %q0\n" "xorq %%r8, %%r8\n" /* (r9,r10) += l1 */ "addq 8(%%rsi), %%r9\n" "adcq $0, %%r10\n" /* (r9,r10,r8) += n1 * c0 */ "movq %8, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += n0 * c1 */ "movq %9, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* extract m1 */ "movq %%r9, %q1\n" "xorq %%r9, %%r9\n" /* (r10,r8,r9) += l2 */ "addq 16(%%rsi), %%r10\n" "adcq $0, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += n2 * c0 */ "movq %8, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += n1 * c1 */ "movq %9, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += n0 */ "addq %%r11, %%r10\n" "adcq $0, %%r8\n" "adcq $0, %%r9\n" /* extract m2 */ "movq %%r10, %q2\n" "xorq %%r10, %%r10\n" /* (r8,r9,r10) += l3 */ "addq 24(%%rsi), %%r8\n" "adcq $0, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += n3 * c0 */ "movq %8, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += n2 * c1 */ "movq %9, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += n1 */ "addq %%r12, %%r8\n" "adcq $0, %%r9\n" "adcq $0, %%r10\n" /* extract m3 */ "movq %%r8, %q3\n" "xorq %%r8, %%r8\n" /* (r9,r10,r8) += n3 * c1 */ "movq %9, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += n2 */ "addq %%r13, %%r9\n" "adcq $0, %%r10\n" "adcq $0, %%r8\n" /* extract m4 */ "movq %%r9, %q4\n" /* (r10,r8) += n3 */ "addq %%r14, %%r10\n" "adcq $0, %%r8\n" /* extract m5 */ "movq %%r10, %q5\n" /* extract m6 */ "movq %%r8, %q6\n" : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6) : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc"); /* Reduce 385 bits into 258. */ __asm__ __volatile__( /* Preload */ "movq %q9, %%r11\n" "movq %q10, %%r12\n" "movq %q11, %%r13\n" /* Initialize (r8,r9,r10) */ "movq %q5, %%r8\n" "xorq %%r9, %%r9\n" "xorq %%r10, %%r10\n" /* (r8,r9) += m4 * c0 */ "movq %12, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" /* extract p0 */ "movq %%r8, %q0\n" "xorq %%r8, %%r8\n" /* (r9,r10) += m1 */ "addq %q6, %%r9\n" "adcq $0, %%r10\n" /* (r9,r10,r8) += m5 * c0 */ "movq %12, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += m4 * c1 */ "movq %13, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* extract p1 */ "movq %%r9, %q1\n" "xorq %%r9, %%r9\n" /* (r10,r8,r9) += m2 */ "addq %q7, %%r10\n" "adcq $0, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += m6 * c0 */ "movq %12, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += m5 * c1 */ "movq %13, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += m4 */ "addq %%r11, %%r10\n" "adcq $0, %%r8\n" "adcq $0, %%r9\n" /* extract p2 */ "movq %%r10, %q2\n" /* (r8,r9) += m3 */ "addq %q8, %%r8\n" "adcq $0, %%r9\n" /* (r8,r9) += m6 * c1 */ "movq %13, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" /* (r8,r9) += m5 */ "addq %%r12, %%r8\n" "adcq $0, %%r9\n" /* extract p3 */ "movq %%r8, %q3\n" /* (r9) += m6 */ "addq %%r13, %%r9\n" /* extract p4 */ "movq %%r9, %q4\n" : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4) : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc"); /* Reduce 258 bits into 256. */ __asm__ __volatile__( /* Preload */ "movq %q5, %%r10\n" /* (rax,rdx) = p4 * c0 */ "movq %7, %%rax\n" "mulq %%r10\n" /* (rax,rdx) += p0 */ "addq %q1, %%rax\n" "adcq $0, %%rdx\n" /* extract r0 */ "movq %%rax, 0(%q6)\n" /* Move to (r8,r9) */ "movq %%rdx, %%r8\n" "xorq %%r9, %%r9\n" /* (r8,r9) += p1 */ "addq %q2, %%r8\n" "adcq $0, %%r9\n" /* (r8,r9) += p4 * c1 */ "movq %8, %%rax\n" "mulq %%r10\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" /* Extract r1 */ "movq %%r8, 8(%q6)\n" "xorq %%r8, %%r8\n" /* (r9,r8) += p4 */ "addq %%r10, %%r9\n" "adcq $0, %%r8\n" /* (r9,r8) += p2 */ "addq %q3, %%r9\n" "adcq $0, %%r8\n" /* Extract r2 */ "movq %%r9, 16(%q6)\n" "xorq %%r9, %%r9\n" /* (r8,r9) += p3 */ "addq %q4, %%r8\n" "adcq $0, %%r9\n" /* Extract r3 */ "movq %%r8, 24(%q6)\n" /* Extract c */ "movq %%r9, %q0\n" : "=g"(c) : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "cc", "memory"); #else uint128_t c; uint64_t c0, c1, c2; uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7]; uint64_t m0, m1, m2, m3, m4, m5; uint32_t m6; uint64_t p0, p1, p2, p3; uint32_t p4; /* Reduce 512 bits into 385. */ /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */ c0 = l[0]; c1 = 0; c2 = 0; muladd_fast(n0, SECP256K1_N_C_0); extract_fast(m0); sumadd_fast(l[1]); muladd(n1, SECP256K1_N_C_0); muladd(n0, SECP256K1_N_C_1); extract(m1); sumadd(l[2]); muladd(n2, SECP256K1_N_C_0); muladd(n1, SECP256K1_N_C_1); sumadd(n0); extract(m2); sumadd(l[3]); muladd(n3, SECP256K1_N_C_0); muladd(n2, SECP256K1_N_C_1); sumadd(n1); extract(m3); muladd(n3, SECP256K1_N_C_1); sumadd(n2); extract(m4); sumadd_fast(n3); extract_fast(m5); VERIFY_CHECK(c0 <= 1); m6 = c0; /* Reduce 385 bits into 258. */ /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */ c0 = m0; c1 = 0; c2 = 0; muladd_fast(m4, SECP256K1_N_C_0); extract_fast(p0); sumadd_fast(m1); muladd(m5, SECP256K1_N_C_0); muladd(m4, SECP256K1_N_C_1); extract(p1); sumadd(m2); muladd(m6, SECP256K1_N_C_0); muladd(m5, SECP256K1_N_C_1); sumadd(m4); extract(p2); sumadd_fast(m3); muladd_fast(m6, SECP256K1_N_C_1); sumadd_fast(m5); extract_fast(p3); p4 = c0 + m6; VERIFY_CHECK(p4 <= 2); /* Reduce 258 bits into 256. */ /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */ c = p0 + (uint128_t)SECP256K1_N_C_0 * p4; r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; c += p1 + (uint128_t)SECP256K1_N_C_1 * p4; r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; c += p2 + (uint128_t)p4; r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; c += p3; r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; #endif /* Final reduction of r. */ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) { #ifdef USE_ASM_X86_64 const uint64_t *pb = b->d; __asm__ __volatile__( /* Preload */ "movq 0(%%rdi), %%r15\n" "movq 8(%%rdi), %%rbx\n" "movq 16(%%rdi), %%rcx\n" "movq 0(%%rdx), %%r11\n" "movq 8(%%rdx), %%r12\n" "movq 16(%%rdx), %%r13\n" "movq 24(%%rdx), %%r14\n" /* (rax,rdx) = a0 * b0 */ "movq %%r15, %%rax\n" "mulq %%r11\n" /* Extract l0 */ "movq %%rax, 0(%%rsi)\n" /* (r8,r9,r10) = (rdx) */ "movq %%rdx, %%r8\n" "xorq %%r9, %%r9\n" "xorq %%r10, %%r10\n" /* (r8,r9,r10) += a0 * b1 */ "movq %%r15, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += a1 * b0 */ "movq %%rbx, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* Extract l1 */ "movq %%r8, 8(%%rsi)\n" "xorq %%r8, %%r8\n" /* (r9,r10,r8) += a0 * b2 */ "movq %%r15, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += a1 * b1 */ "movq %%rbx, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += a2 * b0 */ "movq %%rcx, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* Extract l2 */ "movq %%r9, 16(%%rsi)\n" "xorq %%r9, %%r9\n" /* (r10,r8,r9) += a0 * b3 */ "movq %%r15, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* Preload a3 */ "movq 24(%%rdi), %%r15\n" /* (r10,r8,r9) += a1 * b2 */ "movq %%rbx, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += a2 * b1 */ "movq %%rcx, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += a3 * b0 */ "movq %%r15, %%rax\n" "mulq %%r11\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* Extract l3 */ "movq %%r10, 24(%%rsi)\n" "xorq %%r10, %%r10\n" /* (r8,r9,r10) += a1 * b3 */ "movq %%rbx, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += a2 * b2 */ "movq %%rcx, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += a3 * b1 */ "movq %%r15, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* Extract l4 */ "movq %%r8, 32(%%rsi)\n" "xorq %%r8, %%r8\n" /* (r9,r10,r8) += a2 * b3 */ "movq %%rcx, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += a3 * b2 */ "movq %%r15, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* Extract l5 */ "movq %%r9, 40(%%rsi)\n" /* (r10,r8) += a3 * b3 */ "movq %%r15, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" /* Extract l6 */ "movq %%r10, 48(%%rsi)\n" /* Extract l7 */ "movq %%r8, 56(%%rsi)\n" : "+d"(pb) : "S"(l), "D"(a->d) : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory"); #else /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); muladd(a->d[0], b->d[1]); muladd(a->d[1], b->d[0]); extract(l[1]); muladd(a->d[0], b->d[2]); muladd(a->d[1], b->d[1]); muladd(a->d[2], b->d[0]); extract(l[2]); muladd(a->d[0], b->d[3]); muladd(a->d[1], b->d[2]); muladd(a->d[2], b->d[1]); muladd(a->d[3], b->d[0]); extract(l[3]); muladd(a->d[1], b->d[3]); muladd(a->d[2], b->d[2]); muladd(a->d[3], b->d[1]); extract(l[4]); muladd(a->d[2], b->d[3]); muladd(a->d[3], b->d[2]); extract(l[5]); muladd_fast(a->d[3], b->d[3]); extract_fast(l[6]); VERIFY_CHECK(c1 == 0); l[7] = c0; #endif } static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) { #ifdef USE_ASM_X86_64 __asm__ __volatile__( /* Preload */ "movq 0(%%rdi), %%r11\n" "movq 8(%%rdi), %%r12\n" "movq 16(%%rdi), %%r13\n" "movq 24(%%rdi), %%r14\n" /* (rax,rdx) = a0 * a0 */ "movq %%r11, %%rax\n" "mulq %%r11\n" /* Extract l0 */ "movq %%rax, 0(%%rsi)\n" /* (r8,r9,r10) = (rdx,0) */ "movq %%rdx, %%r8\n" "xorq %%r9, %%r9\n" "xorq %%r10, %%r10\n" /* (r8,r9,r10) += 2 * a0 * a1 */ "movq %%r11, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* Extract l1 */ "movq %%r8, 8(%%rsi)\n" "xorq %%r8, %%r8\n" /* (r9,r10,r8) += 2 * a0 * a2 */ "movq %%r11, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* (r9,r10,r8) += a1 * a1 */ "movq %%r12, %%rax\n" "mulq %%r12\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* Extract l2 */ "movq %%r9, 16(%%rsi)\n" "xorq %%r9, %%r9\n" /* (r10,r8,r9) += 2 * a0 * a3 */ "movq %%r11, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* (r10,r8,r9) += 2 * a1 * a2 */ "movq %%r12, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" "adcq $0, %%r9\n" /* Extract l3 */ "movq %%r10, 24(%%rsi)\n" "xorq %%r10, %%r10\n" /* (r8,r9,r10) += 2 * a1 * a3 */ "movq %%r12, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* (r8,r9,r10) += a2 * a2 */ "movq %%r13, %%rax\n" "mulq %%r13\n" "addq %%rax, %%r8\n" "adcq %%rdx, %%r9\n" "adcq $0, %%r10\n" /* Extract l4 */ "movq %%r8, 32(%%rsi)\n" "xorq %%r8, %%r8\n" /* (r9,r10,r8) += 2 * a2 * a3 */ "movq %%r13, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" "addq %%rax, %%r9\n" "adcq %%rdx, %%r10\n" "adcq $0, %%r8\n" /* Extract l5 */ "movq %%r9, 40(%%rsi)\n" /* (r10,r8) += a3 * a3 */ "movq %%r14, %%rax\n" "mulq %%r14\n" "addq %%rax, %%r10\n" "adcq %%rdx, %%r8\n" /* Extract l6 */ "movq %%r10, 48(%%rsi)\n" /* Extract l7 */ "movq %%r8, 56(%%rsi)\n" : : "S"(l), "D"(a->d) : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory"); #else /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); muladd2(a->d[0], a->d[1]); extract(l[1]); muladd2(a->d[0], a->d[2]); muladd(a->d[1], a->d[1]); extract(l[2]); muladd2(a->d[0], a->d[3]); muladd2(a->d[1], a->d[2]); extract(l[3]); muladd2(a->d[1], a->d[3]); muladd(a->d[2], a->d[2]); extract(l[4]); muladd2(a->d[2], a->d[3]); extract(l[5]); muladd_fast(a->d[3], a->d[3]); extract_fast(l[6]); VERIFY_CHECK(c1 == 0); l[7] = c0; #endif } #undef sumadd #undef sumadd_fast #undef muladd #undef muladd_fast #undef muladd2 #undef extract #undef extract_fast static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { uint64_t l[8]; secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_reduce_512(r, l); } static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); ret = r->d[0] & ((1 << n) - 1); r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n)); r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n)); r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n)); r->d[3] = (r->d[3] >> n); return ret; } static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { uint64_t l[8]; secp256k1_scalar_sqr_512(l, a); secp256k1_scalar_reduce_512(r, l); } -#ifdef USE_ENDOMORPHISM static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; r1->d[2] = 0; r1->d[3] = 0; r2->d[0] = k->d[2]; r2->d[1] = k->d[3]; r2->d[2] = 0; r2->d[3] = 0; } -#endif SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) { uint64_t l[8]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); secp256k1_scalar_mul_512(l, a, b); shiftlimbs = shift >> 6; shiftlow = shift & 0x3F; shifthigh = 64 - shiftlow; r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); } static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint64_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = flag + ~((uint64_t)0); mask1 = ~mask0; r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1); r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1); r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/secp256k1/src/scalar_8x32_impl.h b/src/secp256k1/src/scalar_8x32_impl.h index 44561c8bd..6853f79ec 100644 --- a/src/secp256k1/src/scalar_8x32_impl.h +++ b/src/secp256k1/src/scalar_8x32_impl.h @@ -1,736 +1,734 @@ /********************************************************************** * Copyright (c) 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_SCALAR_REPR_IMPL_H #define SECP256K1_SCALAR_REPR_IMPL_H /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint32_t)0xD0364141UL) #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) #define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL) #define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL) #define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL) #define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL) /* Limbs of 2^256 minus the secp256k1 order. */ #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) #define SECP256K1_N_C_1 (~SECP256K1_N_1) #define SECP256K1_N_C_2 (~SECP256K1_N_2) #define SECP256K1_N_C_3 (~SECP256K1_N_3) #define SECP256K1_N_C_4 (1) /* Limbs of half the secp256k1 order. */ #define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL) #define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL) #define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL) #define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL) #define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) #define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { r->d[0] = 0; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; r->d[4] = 0; r->d[5] = 0; r->d[6] = 0; r->d[7] = 0; } SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { r->d[0] = v; r->d[1] = 0; r->d[2] = 0; r->d[3] = 0; r->d[4] = 0; r->d[5] = 0; r->d[6] = 0; r->d[7] = 0; } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); if ((offset + count - 1) >> 5 == offset >> 5) { return secp256k1_scalar_get_bits(a, offset, count); } else { VERIFY_CHECK((offset >> 5) + 1 < 8); return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); } } SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */ no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */ no |= (a->d[4] < SECP256K1_N_4); yes |= (a->d[4] > SECP256K1_N_4) & ~no; no |= (a->d[3] < SECP256K1_N_3) & ~yes; yes |= (a->d[3] > SECP256K1_N_3) & ~no; no |= (a->d[2] < SECP256K1_N_2) & ~yes; yes |= (a->d[2] > SECP256K1_N_2) & ~no; no |= (a->d[1] < SECP256K1_N_1) & ~yes; yes |= (a->d[1] > SECP256K1_N_1) & ~no; yes |= (a->d[0] >= SECP256K1_N_0) & ~no; return yes; } SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) { uint64_t t; VERIFY_CHECK(overflow <= 1); t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0; r->d[0] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1; r->d[1] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2; r->d[2] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3; r->d[3] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4; r->d[4] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[5]; r->d[5] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[6]; r->d[6] = t & 0xFFFFFFFFUL; t >>= 32; t += (uint64_t)r->d[7]; r->d[7] = t & 0xFFFFFFFFUL; return overflow; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[1] + b->d[1]; r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[2] + b->d[2]; r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[3] + b->d[3]; r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[4] + b->d[4]; r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[5] + b->d[5]; r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[6] + b->d[6]; r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; overflow = t + secp256k1_scalar_check_overflow(r); VERIFY_CHECK(overflow == 0 || overflow == 1); secp256k1_scalar_reduce(r, overflow); return overflow; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { uint64_t t; VERIFY_CHECK(bit < 256); bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F)); r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F)); r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F)); r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F)); r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F)); r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F)); r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F)); r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); r->d[7] = t & 0xFFFFFFFFULL; #ifdef VERIFY VERIFY_CHECK((t >> 32) == 0); VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); #endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { int over; r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24; r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24; r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24; r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24; r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24; r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24; r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24; r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24; over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); if (overflow) { *overflow = over; } } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7]; bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6]; bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5]; bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4]; bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3]; bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2]; bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1]; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; r->d[1] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[2]) + SECP256K1_N_2; r->d[2] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[3]) + SECP256K1_N_3; r->d[3] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[4]) + SECP256K1_N_4; r->d[4] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[5]) + SECP256K1_N_5; r->d[5] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[6]) + SECP256K1_N_6; r->d[6] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; r->d[7] = t & nonzero; } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { int yes = 0; int no = 0; no |= (a->d[7] < SECP256K1_N_H_7); yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */ no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */ no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */ no |= (a->d[3] < SECP256K1_N_H_3) & ~yes; yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; yes |= (a->d[2] > SECP256K1_N_H_2) & ~no; no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; return yes; } static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { /* If we are flag = 0, mask = 00...00 and this is a no-op; * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ uint32_t mask = !flag - 1; uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); r->d[1] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); r->d[2] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); r->d[3] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask); r->d[4] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask); r->d[5] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask); r->d[6] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); r->d[7] = t & nonzero; return 2 * (mask == 0) - 1; } /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ #define muladd(a,b) { \ uint32_t tl, th; \ { \ uint64_t t = (uint64_t)a * b; \ th = t >> 32; /* at most 0xFFFFFFFE */ \ tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFF */ \ c1 += th; /* overflow is handled on the next line */ \ c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ } /** Add a*b to the number defined by (c0,c1). c1 must never overflow. */ #define muladd_fast(a,b) { \ uint32_t tl, th; \ { \ uint64_t t = (uint64_t)a * b; \ th = t >> 32; /* at most 0xFFFFFFFE */ \ tl = t; \ } \ c0 += tl; /* overflow is handled on the next line */ \ th += (c0 < tl); /* at most 0xFFFFFFFF */ \ c1 += th; /* never overflows by contract (verified in the next line) */ \ VERIFY_CHECK(c1 >= th); \ } /** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ #define muladd2(a,b) { \ uint32_t tl, th, th2, tl2; \ { \ uint64_t t = (uint64_t)a * b; \ th = t >> 32; /* at most 0xFFFFFFFE */ \ tl = t; \ } \ th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \ c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \ th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \ c0 += tl2; /* overflow is handled on the next line */ \ th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ c1 += th2; /* overflow is handled on the next line */ \ c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ } /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ #define sumadd(a) { \ unsigned int over; \ c0 += (a); /* overflow is handled on the next line */ \ over = (c0 < (a)); \ c1 += over; /* overflow is handled on the next line */ \ c2 += (c1 < over); /* never overflows by contract */ \ } /** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ #define sumadd_fast(a) { \ c0 += (a); /* overflow is handled on the next line */ \ c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ VERIFY_CHECK(c2 == 0); \ } /** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */ #define extract(n) { \ (n) = c0; \ c0 = c1; \ c1 = c2; \ c2 = 0; \ } /** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */ #define extract_fast(n) { \ (n) = c0; \ c0 = c1; \ c1 = 0; \ VERIFY_CHECK(c2 == 0); \ } static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) { uint64_t c; uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8; /* 96 bit accumulator. */ uint32_t c0, c1, c2; /* Reduce 512 bits into 385. */ /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */ c0 = l[0]; c1 = 0; c2 = 0; muladd_fast(n0, SECP256K1_N_C_0); extract_fast(m0); sumadd_fast(l[1]); muladd(n1, SECP256K1_N_C_0); muladd(n0, SECP256K1_N_C_1); extract(m1); sumadd(l[2]); muladd(n2, SECP256K1_N_C_0); muladd(n1, SECP256K1_N_C_1); muladd(n0, SECP256K1_N_C_2); extract(m2); sumadd(l[3]); muladd(n3, SECP256K1_N_C_0); muladd(n2, SECP256K1_N_C_1); muladd(n1, SECP256K1_N_C_2); muladd(n0, SECP256K1_N_C_3); extract(m3); sumadd(l[4]); muladd(n4, SECP256K1_N_C_0); muladd(n3, SECP256K1_N_C_1); muladd(n2, SECP256K1_N_C_2); muladd(n1, SECP256K1_N_C_3); sumadd(n0); extract(m4); sumadd(l[5]); muladd(n5, SECP256K1_N_C_0); muladd(n4, SECP256K1_N_C_1); muladd(n3, SECP256K1_N_C_2); muladd(n2, SECP256K1_N_C_3); sumadd(n1); extract(m5); sumadd(l[6]); muladd(n6, SECP256K1_N_C_0); muladd(n5, SECP256K1_N_C_1); muladd(n4, SECP256K1_N_C_2); muladd(n3, SECP256K1_N_C_3); sumadd(n2); extract(m6); sumadd(l[7]); muladd(n7, SECP256K1_N_C_0); muladd(n6, SECP256K1_N_C_1); muladd(n5, SECP256K1_N_C_2); muladd(n4, SECP256K1_N_C_3); sumadd(n3); extract(m7); muladd(n7, SECP256K1_N_C_1); muladd(n6, SECP256K1_N_C_2); muladd(n5, SECP256K1_N_C_3); sumadd(n4); extract(m8); muladd(n7, SECP256K1_N_C_2); muladd(n6, SECP256K1_N_C_3); sumadd(n5); extract(m9); muladd(n7, SECP256K1_N_C_3); sumadd(n6); extract(m10); sumadd_fast(n7); extract_fast(m11); VERIFY_CHECK(c0 <= 1); m12 = c0; /* Reduce 385 bits into 258. */ /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */ c0 = m0; c1 = 0; c2 = 0; muladd_fast(m8, SECP256K1_N_C_0); extract_fast(p0); sumadd_fast(m1); muladd(m9, SECP256K1_N_C_0); muladd(m8, SECP256K1_N_C_1); extract(p1); sumadd(m2); muladd(m10, SECP256K1_N_C_0); muladd(m9, SECP256K1_N_C_1); muladd(m8, SECP256K1_N_C_2); extract(p2); sumadd(m3); muladd(m11, SECP256K1_N_C_0); muladd(m10, SECP256K1_N_C_1); muladd(m9, SECP256K1_N_C_2); muladd(m8, SECP256K1_N_C_3); extract(p3); sumadd(m4); muladd(m12, SECP256K1_N_C_0); muladd(m11, SECP256K1_N_C_1); muladd(m10, SECP256K1_N_C_2); muladd(m9, SECP256K1_N_C_3); sumadd(m8); extract(p4); sumadd(m5); muladd(m12, SECP256K1_N_C_1); muladd(m11, SECP256K1_N_C_2); muladd(m10, SECP256K1_N_C_3); sumadd(m9); extract(p5); sumadd(m6); muladd(m12, SECP256K1_N_C_2); muladd(m11, SECP256K1_N_C_3); sumadd(m10); extract(p6); sumadd_fast(m7); muladd_fast(m12, SECP256K1_N_C_3); sumadd_fast(m11); extract_fast(p7); p8 = c0 + m12; VERIFY_CHECK(p8 <= 2); /* Reduce 258 bits into 256. */ /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */ c = p0 + (uint64_t)SECP256K1_N_C_0 * p8; r->d[0] = c & 0xFFFFFFFFUL; c >>= 32; c += p1 + (uint64_t)SECP256K1_N_C_1 * p8; r->d[1] = c & 0xFFFFFFFFUL; c >>= 32; c += p2 + (uint64_t)SECP256K1_N_C_2 * p8; r->d[2] = c & 0xFFFFFFFFUL; c >>= 32; c += p3 + (uint64_t)SECP256K1_N_C_3 * p8; r->d[3] = c & 0xFFFFFFFFUL; c >>= 32; c += p4 + (uint64_t)p8; r->d[4] = c & 0xFFFFFFFFUL; c >>= 32; c += p5; r->d[5] = c & 0xFFFFFFFFUL; c >>= 32; c += p6; r->d[6] = c & 0xFFFFFFFFUL; c >>= 32; c += p7; r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; /* Final reduction of r. */ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; /* l[0..15] = a[0..7] * b[0..7]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); muladd(a->d[0], b->d[1]); muladd(a->d[1], b->d[0]); extract(l[1]); muladd(a->d[0], b->d[2]); muladd(a->d[1], b->d[1]); muladd(a->d[2], b->d[0]); extract(l[2]); muladd(a->d[0], b->d[3]); muladd(a->d[1], b->d[2]); muladd(a->d[2], b->d[1]); muladd(a->d[3], b->d[0]); extract(l[3]); muladd(a->d[0], b->d[4]); muladd(a->d[1], b->d[3]); muladd(a->d[2], b->d[2]); muladd(a->d[3], b->d[1]); muladd(a->d[4], b->d[0]); extract(l[4]); muladd(a->d[0], b->d[5]); muladd(a->d[1], b->d[4]); muladd(a->d[2], b->d[3]); muladd(a->d[3], b->d[2]); muladd(a->d[4], b->d[1]); muladd(a->d[5], b->d[0]); extract(l[5]); muladd(a->d[0], b->d[6]); muladd(a->d[1], b->d[5]); muladd(a->d[2], b->d[4]); muladd(a->d[3], b->d[3]); muladd(a->d[4], b->d[2]); muladd(a->d[5], b->d[1]); muladd(a->d[6], b->d[0]); extract(l[6]); muladd(a->d[0], b->d[7]); muladd(a->d[1], b->d[6]); muladd(a->d[2], b->d[5]); muladd(a->d[3], b->d[4]); muladd(a->d[4], b->d[3]); muladd(a->d[5], b->d[2]); muladd(a->d[6], b->d[1]); muladd(a->d[7], b->d[0]); extract(l[7]); muladd(a->d[1], b->d[7]); muladd(a->d[2], b->d[6]); muladd(a->d[3], b->d[5]); muladd(a->d[4], b->d[4]); muladd(a->d[5], b->d[3]); muladd(a->d[6], b->d[2]); muladd(a->d[7], b->d[1]); extract(l[8]); muladd(a->d[2], b->d[7]); muladd(a->d[3], b->d[6]); muladd(a->d[4], b->d[5]); muladd(a->d[5], b->d[4]); muladd(a->d[6], b->d[3]); muladd(a->d[7], b->d[2]); extract(l[9]); muladd(a->d[3], b->d[7]); muladd(a->d[4], b->d[6]); muladd(a->d[5], b->d[5]); muladd(a->d[6], b->d[4]); muladd(a->d[7], b->d[3]); extract(l[10]); muladd(a->d[4], b->d[7]); muladd(a->d[5], b->d[6]); muladd(a->d[6], b->d[5]); muladd(a->d[7], b->d[4]); extract(l[11]); muladd(a->d[5], b->d[7]); muladd(a->d[6], b->d[6]); muladd(a->d[7], b->d[5]); extract(l[12]); muladd(a->d[6], b->d[7]); muladd(a->d[7], b->d[6]); extract(l[13]); muladd_fast(a->d[7], b->d[7]); extract_fast(l[14]); VERIFY_CHECK(c1 == 0); l[15] = c0; } static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; /* l[0..15] = a[0..7]^2. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); muladd2(a->d[0], a->d[1]); extract(l[1]); muladd2(a->d[0], a->d[2]); muladd(a->d[1], a->d[1]); extract(l[2]); muladd2(a->d[0], a->d[3]); muladd2(a->d[1], a->d[2]); extract(l[3]); muladd2(a->d[0], a->d[4]); muladd2(a->d[1], a->d[3]); muladd(a->d[2], a->d[2]); extract(l[4]); muladd2(a->d[0], a->d[5]); muladd2(a->d[1], a->d[4]); muladd2(a->d[2], a->d[3]); extract(l[5]); muladd2(a->d[0], a->d[6]); muladd2(a->d[1], a->d[5]); muladd2(a->d[2], a->d[4]); muladd(a->d[3], a->d[3]); extract(l[6]); muladd2(a->d[0], a->d[7]); muladd2(a->d[1], a->d[6]); muladd2(a->d[2], a->d[5]); muladd2(a->d[3], a->d[4]); extract(l[7]); muladd2(a->d[1], a->d[7]); muladd2(a->d[2], a->d[6]); muladd2(a->d[3], a->d[5]); muladd(a->d[4], a->d[4]); extract(l[8]); muladd2(a->d[2], a->d[7]); muladd2(a->d[3], a->d[6]); muladd2(a->d[4], a->d[5]); extract(l[9]); muladd2(a->d[3], a->d[7]); muladd2(a->d[4], a->d[6]); muladd(a->d[5], a->d[5]); extract(l[10]); muladd2(a->d[4], a->d[7]); muladd2(a->d[5], a->d[6]); extract(l[11]); muladd2(a->d[5], a->d[7]); muladd(a->d[6], a->d[6]); extract(l[12]); muladd2(a->d[6], a->d[7]); extract(l[13]); muladd_fast(a->d[7], a->d[7]); extract_fast(l[14]); VERIFY_CHECK(c1 == 0); l[15] = c0; } #undef sumadd #undef sumadd_fast #undef muladd #undef muladd_fast #undef muladd2 #undef extract #undef extract_fast static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { uint32_t l[16]; secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_reduce_512(r, l); } static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { int ret; VERIFY_CHECK(n > 0); VERIFY_CHECK(n < 16); ret = r->d[0] & ((1 << n) - 1); r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n)); r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n)); r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n)); r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n)); r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n)); r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n)); r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n)); r->d[7] = (r->d[7] >> n); return ret; } static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { uint32_t l[16]; secp256k1_scalar_sqr_512(l, a); secp256k1_scalar_reduce_512(r, l); } -#ifdef USE_ENDOMORPHISM static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; r1->d[2] = k->d[2]; r1->d[3] = k->d[3]; r1->d[4] = 0; r1->d[5] = 0; r1->d[6] = 0; r1->d[7] = 0; r2->d[0] = k->d[4]; r2->d[1] = k->d[5]; r2->d[2] = k->d[6]; r2->d[3] = k->d[7]; r2->d[4] = 0; r2->d[5] = 0; r2->d[6] = 0; r2->d[7] = 0; } -#endif SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) { uint32_t l[16]; unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; VERIFY_CHECK(shift >= 256); secp256k1_scalar_mul_512(l, a, b); shiftlimbs = shift >> 5; shiftlow = shift & 0x1F; shifthigh = 32 - shiftlow; r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); } static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; VG_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = flag + ~((uint32_t)0); mask1 = ~mask0; r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1); r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1); r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); r->d[4] = (r->d[4] & mask0) | (a->d[4] & mask1); r->d[5] = (r->d[5] & mask0) | (a->d[5] & mask1); r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1); r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/secp256k1/src/scalar_impl.h b/src/secp256k1/src/scalar_impl.h index 1657dd032..b6f014ff1 100644 --- a/src/secp256k1/src/scalar_impl.h +++ b/src/secp256k1/src/scalar_impl.h @@ -1,513 +1,511 @@ /********************************************************************** * Copyright (c) 2014 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_SCALAR_IMPL_H #define SECP256K1_SCALAR_IMPL_H #ifdef VERIFY #include #endif #include "scalar.h" #include "util.h" #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" #endif #if defined(EXHAUSTIVE_TEST_ORDER) #include "scalar_low_impl.h" #elif defined(SECP256K1_WIDEMUL_INT128) #include "scalar_4x64_impl.h" #elif defined(SECP256K1_WIDEMUL_INT64) #include "scalar_8x32_impl.h" #else #error "Please select wide multiplication implementation" #endif static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); static const secp256k1_scalar secp256k1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); #ifndef USE_NUM_NONE static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) { unsigned char c[32]; secp256k1_scalar_get_b32(c, a); secp256k1_num_set_bin(r, c, 32); } /** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */ static void secp256k1_scalar_order_get_num(secp256k1_num *r) { #if defined(EXHAUSTIVE_TEST_ORDER) static const unsigned char order[32] = { 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER }; #else static const unsigned char order[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 }; #endif secp256k1_num_set_bin(r, order, 32); } #endif static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) { int overflow; secp256k1_scalar_set_b32(r, bin, &overflow); return (!overflow) & (!secp256k1_scalar_is_zero(r)); } static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { #if defined(EXHAUSTIVE_TEST_ORDER) int i; *r = 0; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) *r = i; /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus * have a composite group order; fix it in exhaustive_tests.c). */ VERIFY_CHECK(*r != 0); } #else secp256k1_scalar *t; int i; /* First compute xN as x ^ (2^N - 1) for some values of N, * and uM as x ^ M for some values of M. */ secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; secp256k1_scalar u2, u5, u9, u11, u13; secp256k1_scalar_sqr(&u2, x); secp256k1_scalar_mul(&x2, &u2, x); secp256k1_scalar_mul(&u5, &u2, &x2); secp256k1_scalar_mul(&x3, &u5, &u2); secp256k1_scalar_mul(&u9, &x3, &u2); secp256k1_scalar_mul(&u11, &u9, &u2); secp256k1_scalar_mul(&u13, &u11, &u2); secp256k1_scalar_sqr(&x6, &u13); secp256k1_scalar_sqr(&x6, &x6); secp256k1_scalar_mul(&x6, &x6, &u11); secp256k1_scalar_sqr(&x8, &x6); secp256k1_scalar_sqr(&x8, &x8); secp256k1_scalar_mul(&x8, &x8, &x2); secp256k1_scalar_sqr(&x14, &x8); for (i = 0; i < 5; i++) { secp256k1_scalar_sqr(&x14, &x14); } secp256k1_scalar_mul(&x14, &x14, &x6); secp256k1_scalar_sqr(&x28, &x14); for (i = 0; i < 13; i++) { secp256k1_scalar_sqr(&x28, &x28); } secp256k1_scalar_mul(&x28, &x28, &x14); secp256k1_scalar_sqr(&x56, &x28); for (i = 0; i < 27; i++) { secp256k1_scalar_sqr(&x56, &x56); } secp256k1_scalar_mul(&x56, &x56, &x28); secp256k1_scalar_sqr(&x112, &x56); for (i = 0; i < 55; i++) { secp256k1_scalar_sqr(&x112, &x112); } secp256k1_scalar_mul(&x112, &x112, &x56); secp256k1_scalar_sqr(&x126, &x112); for (i = 0; i < 13; i++) { secp256k1_scalar_sqr(&x126, &x126); } secp256k1_scalar_mul(&x126, &x126, &x14); /* Then accumulate the final result (t starts at x126). */ t = &x126; for (i = 0; i < 3; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 5; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 5; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 6; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 3; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 5; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 000 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u5); /* 101 */ for (i = 0; i < 10; i++) { /* 0000000 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 4; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x3); /* 111 */ for (i = 0; i < 9; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ for (i = 0; i < 5; i++) { /* 0 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u11); /* 1011 */ for (i = 0; i < 4; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 5; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &x2); /* 11 */ for (i = 0; i < 6; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 10; i++) { /* 000000 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u13); /* 1101 */ for (i = 0; i < 4; i++) { secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, &u9); /* 1001 */ for (i = 0; i < 6; i++) { /* 00000 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(t, t, x); /* 1 */ for (i = 0; i < 8; i++) { /* 00 */ secp256k1_scalar_sqr(t, t); } secp256k1_scalar_mul(r, t, &x6); /* 111111 */ } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { return !(a->d[0] & 1); } #endif static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { #if defined(USE_SCALAR_INV_BUILTIN) secp256k1_scalar_inverse(r, x); #elif defined(USE_SCALAR_INV_NUM) unsigned char b[32]; secp256k1_num n, m; secp256k1_scalar t = *x; secp256k1_scalar_get_b32(b, &t); secp256k1_num_set_bin(&n, b, 32); secp256k1_scalar_order_get_num(&m); secp256k1_num_mod_inverse(&n, &n, &m); secp256k1_num_get_bin(b, 32, &n); secp256k1_scalar_set_b32(r, b, NULL); /* Verify that the inverse was computed correctly, without GMP code. */ secp256k1_scalar_mul(&t, &t, r); CHECK(secp256k1_scalar_is_one(&t)); #else #error "Please select scalar inverse implementation" #endif } -#ifdef USE_ENDOMORPHISM /* These parameters are generated using sage/gen_exhaustive_groups.sage. */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 13 # define EXHAUSTIVE_TEST_LAMBDA 9 # elif EXHAUSTIVE_TEST_ORDER == 199 # define EXHAUSTIVE_TEST_LAMBDA 92 # else # error No known lambda for the specified exhaustive test group order. # endif /** * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the * full case we don't bother making k1 and k2 be small, we just want them to be * nontrivial to get full test coverage for the exhaustive tests. We therefore * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda. */ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; } #else /** * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72} * * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and * beta^3 == 1 mod p, where n is the curve order and p is the field order. * * Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p. * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.) * * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a * reduced basis {a1 + b1*l, a2 + b2*l} where * * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3} * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8} * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} * * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 * and k2 have a small size. * * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and * compute k - k2 * lambda (mod n) which is equivalent to k1 (mod n), avoiding the need for * the constants a1 and a2. * * g1, g2 are precomputed constants used to replace division with a rounded multiplication * when decomposing the scalar for an endomorphism-based point multiplication. * * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5. * * The derivation is described in the paper "Efficient Software Implementation of Public-Key * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez), * Section 4.3 (here we use a somewhat higher-precision estimate): * d = a1*b2 - b1*a2 * g1 = round(2^384 * b2/d) * g2 = round(2^384 * (-b1)/d) * * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2] * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda). * * The function below splits k into r1 and r2, such that * - r1 + lambda * r2 == k (mod n) * - either r1 < 2^128 or -r1 mod n < 2^128 * - either r2 < 2^128 or -r2 mod n < 2^128 * * Proof. * * Let * - epsilon1 = 2^256 * |g1/2^384 - b2/d| * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d| * - c1 = round(k*g1/2^384) * - c2 = round(k*g2/2^384) * * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1 * * |c1 - k*b2/d| * = * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d| * <= {triangle inequality} * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d| * = * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d| * < {rounding in c1 and 0 <= k < 2^256} * 2^-1 + 2^256 * |g1/2^384 - b2/d| * = {definition of epsilon1} * 2^-1 + epsilon1 * * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2 * * |c2 - k*(-b1)/d| * = * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d| * <= {triangle inequality} * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d| * = * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d| * < {rounding in c2 and 0 <= k < 2^256} * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d| * = {definition of epsilon2} * 2^-1 + epsilon2 * * Let * - k1 = k - c1*a1 - c2*a2 * - k2 = - c1*b1 - c2*b2 * * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128 * * |k1| * = {definition of k1} * |k - c1*a1 - c2*a2| * = {(a1*b2 - b1*a2)/n = 1} * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2| * = * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)| * <= {triangle inequality} * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2| * < {Lemma 1 and Lemma 2} * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2) * < {rounding up to an integer} * (a1 + a2 + 1)/2 * < {rounding up to a power of 2} * 2^128 * * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128 * * |k2| * = {definition of k2} * |- c1*a1 - c2*a2| * = {(b1*b2 - b1*b2)/n = 0} * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2| * = * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)| * <= {triangle inequality} * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2| * < {Lemma 1 and Lemma 2} * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2) * < {rounding up to an integer} * (-b1 + b2)/2 + 1 * < {rounding up to a power of 2} * 2^128 * * Let * - r2 = k2 mod n * - r1 = k - r2*lambda mod n. * * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n). * * Lemma 5: r1 == k1 mod n. * * r1 * == {definition of r1 and r2} * k - k2*lambda * == {definition of k2} * k - (- c1*b1 - c2*b2)*lambda * == * k + c1*b1*lambda + c2*b2*lambda * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n} * k - c1*a1 - c2*a2 * == {definition of k1} * k1 * * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that * * - either r1 < 2^128 or -r1 mod n < 2^128 * - either r2 < 2^128 or -r2 mod n < 2^128. * * Q.E.D. */ static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST( 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL, 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL ); #ifdef VERIFY static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) { secp256k1_scalar s; unsigned char buf1[32]; unsigned char buf2[32]; /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */ static const unsigned char k1_bound[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77 }; /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */ static const unsigned char k2_bound[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed }; secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2); secp256k1_scalar_add(&s, &s, r1); VERIFY_CHECK(secp256k1_scalar_eq(&s, k)); secp256k1_scalar_negate(&s, r1); secp256k1_scalar_get_b32(buf1, r1); secp256k1_scalar_get_b32(buf2, &s); VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0); secp256k1_scalar_negate(&s, r2); secp256k1_scalar_get_b32(buf1, r2); secp256k1_scalar_get_b32(buf2, &s); VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0); } #endif static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { secp256k1_scalar c1, c2; static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL ); static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL ); static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST( 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL, 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL ); static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST( 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); /* these _var calls are constant time since the shift amount is constant */ secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384); secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384); secp256k1_scalar_mul(&c1, &c1, &minus_b1); secp256k1_scalar_mul(&c2, &c2, &minus_b2); secp256k1_scalar_add(r2, &c1, &c2); secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda); secp256k1_scalar_negate(r1, r1); secp256k1_scalar_add(r1, r1, k); #ifdef VERIFY secp256k1_scalar_split_lambda_verify(r1, r2, k); #endif } #endif -#endif #endif /* SECP256K1_SCALAR_IMPL_H */ diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c index ffe395075..330d7434f 100644 --- a/src/secp256k1/src/tests.c +++ b/src/secp256k1/src/tests.c @@ -1,5772 +1,5753 @@ /********************************************************************** * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" #endif #include #include #include #include #include "secp256k1.c" #include "include/secp256k1.h" #include "include/secp256k1_preallocated.h" #include "testrand_impl.h" #ifdef ENABLE_OPENSSL_TESTS #include "openssl/bn.h" #include "openssl/ec.h" #include "openssl/ecdsa.h" #include "openssl/obj_mac.h" # if OPENSSL_VERSION_NUMBER < 0x10100000L void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) {*pr = sig->r; *ps = sig->s;} # endif #endif #include "contrib/lax_der_parsing.c" #include "contrib/lax_der_privatekey_parsing.c" static int count = 64; static secp256k1_context *ctx = NULL; static void counting_illegal_callback_fn(const char* str, void* data) { /* Dummy callback function that just counts. */ int32_t *p; (void)str; p = data; (*p)++; } static void uncounting_illegal_callback_fn(const char* str, void* data) { /* Dummy callback function that just counts (backwards). */ int32_t *p; (void)str; p = data; (*p)--; } void random_field_element_test(secp256k1_fe *fe) { do { unsigned char b32[32]; secp256k1_testrand256_test(b32); if (secp256k1_fe_set_b32(fe, b32)) { break; } } while(1); } void random_field_element_magnitude(secp256k1_fe *fe) { secp256k1_fe zero; int n = secp256k1_testrand_int(9); secp256k1_fe_normalize(fe); if (n == 0) { return; } secp256k1_fe_clear(&zero); secp256k1_fe_negate(&zero, &zero, 0); secp256k1_fe_mul_int(&zero, n - 1); secp256k1_fe_add(fe, &zero); #ifdef VERIFY CHECK(fe->magnitude == n); #endif } void random_group_element_test(secp256k1_ge *ge) { secp256k1_fe fe; do { random_field_element_test(&fe); if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_testrand_bits(1))) { secp256k1_fe_normalize(&ge->y); break; } } while(1); ge->infinity = 0; } void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) { secp256k1_fe z2, z3; do { random_field_element_test(&gej->z); if (!secp256k1_fe_is_zero(&gej->z)) { break; } } while(1); secp256k1_fe_sqr(&z2, &gej->z); secp256k1_fe_mul(&z3, &z2, &gej->z); secp256k1_fe_mul(&gej->x, &ge->x, &z2); secp256k1_fe_mul(&gej->y, &ge->y, &z3); gej->infinity = ge->infinity; } void random_scalar_order_test(secp256k1_scalar *num) { do { unsigned char b32[32]; int overflow = 0; secp256k1_testrand256_test(b32); secp256k1_scalar_set_b32(num, b32, &overflow); if (overflow || secp256k1_scalar_is_zero(num)) { continue; } break; } while(1); } void random_scalar_order(secp256k1_scalar *num) { do { unsigned char b32[32]; int overflow = 0; secp256k1_testrand256(b32); secp256k1_scalar_set_b32(num, b32, &overflow); if (overflow || secp256k1_scalar_is_zero(num)) { continue; } break; } while(1); } void random_scalar_order_b32(unsigned char *b32) { secp256k1_scalar num; random_scalar_order(&num); secp256k1_scalar_get_b32(b32, &num); } void run_context_tests(int use_prealloc) { secp256k1_pubkey pubkey; secp256k1_pubkey zero_pubkey; secp256k1_ecdsa_signature sig; unsigned char ctmp[32]; int32_t ecount; int32_t ecount2; secp256k1_context *none; secp256k1_context *sign; secp256k1_context *vrfy; secp256k1_context *both; void *none_prealloc = NULL; void *sign_prealloc = NULL; void *vrfy_prealloc = NULL; void *both_prealloc = NULL; secp256k1_gej pubj; secp256k1_ge pub; secp256k1_scalar msg, key, nonce; secp256k1_scalar sigr, sigs; if (use_prealloc) { none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(none_prealloc != NULL); CHECK(sign_prealloc != NULL); CHECK(vrfy_prealloc != NULL); CHECK(both_prealloc != NULL); none = secp256k1_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); sign = secp256k1_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); vrfy = secp256k1_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); both = secp256k1_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); } else { none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); } memset(&zero_pubkey, 0, sizeof(zero_pubkey)); ecount = 0; ecount2 = 10; secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); /* set error callback (to a function that still aborts in case malloc() fails in secp256k1_context_clone() below) */ secp256k1_context_set_error_callback(sign, secp256k1_default_illegal_callback_fn, NULL); CHECK(sign->error_callback.fn != vrfy->error_callback.fn); CHECK(sign->error_callback.fn == secp256k1_default_illegal_callback_fn); /* check if sizes for cloning are consistent */ CHECK(secp256k1_context_preallocated_clone_size(none) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(secp256k1_context_preallocated_clone_size(sign) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(secp256k1_context_preallocated_clone_size(vrfy) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(secp256k1_context_preallocated_clone_size(both) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); /*** clone and destroy all of them to make sure cloning was complete ***/ { secp256k1_context *ctx_tmp; if (use_prealloc) { /* clone into a non-preallocated context and then again into a new preallocated one. */ ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp); free(none_prealloc); none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, none_prealloc); secp256k1_context_destroy(ctx_tmp); ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp); free(sign_prealloc); sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, sign_prealloc); secp256k1_context_destroy(ctx_tmp); ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp); free(vrfy_prealloc); vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, vrfy_prealloc); secp256k1_context_destroy(ctx_tmp); ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp); free(both_prealloc); both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, both_prealloc); secp256k1_context_destroy(ctx_tmp); } else { /* clone into a preallocated context and then again into a new non-preallocated one. */ void *prealloc_tmp; prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp); free(prealloc_tmp); } } /* Verify that the error callback makes it across the clone. */ CHECK(sign->error_callback.fn != vrfy->error_callback.fn); CHECK(sign->error_callback.fn == secp256k1_default_illegal_callback_fn); /* And that it resets back to default. */ secp256k1_context_set_error_callback(sign, NULL, NULL); CHECK(vrfy->error_callback.fn == sign->error_callback.fn); /*** attempt to use them ***/ random_scalar_order_test(&msg); random_scalar_order_test(&key); secp256k1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); secp256k1_ge_set_gej(&pub, &pubj); /* Verify context-type checking illegal-argument errors. */ memset(ctmp, 1, 32); CHECK(secp256k1_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0); CHECK(ecount == 1); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(sign, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0); CHECK(ecount == 2); VG_UNDEF(&sig, sizeof(sig)); CHECK(secp256k1_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); VG_CHECK(&sig, sizeof(sig)); CHECK(ecount2 == 10); CHECK(secp256k1_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0); CHECK(ecount2 == 11); CHECK(secp256k1_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1); CHECK(ecount == 2); CHECK(secp256k1_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 12); CHECK(secp256k1_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 2); CHECK(secp256k1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); CHECK(ecount2 == 13); CHECK(secp256k1_ec_pubkey_negate(vrfy, &pubkey) == 1); CHECK(ecount == 2); CHECK(secp256k1_ec_pubkey_negate(sign, &pubkey) == 1); CHECK(ecount == 2); CHECK(secp256k1_ec_pubkey_negate(sign, NULL) == 0); CHECK(ecount2 == 14); CHECK(secp256k1_ec_pubkey_negate(vrfy, &zero_pubkey) == 0); CHECK(ecount == 3); CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 3); CHECK(secp256k1_context_randomize(vrfy, ctmp) == 1); CHECK(ecount == 3); CHECK(secp256k1_context_randomize(vrfy, NULL) == 1); CHECK(ecount == 3); CHECK(secp256k1_context_randomize(sign, ctmp) == 1); CHECK(ecount2 == 14); CHECK(secp256k1_context_randomize(sign, NULL) == 1); CHECK(ecount2 == 14); secp256k1_context_set_illegal_callback(vrfy, NULL, NULL); secp256k1_context_set_illegal_callback(sign, NULL, NULL); /* obtain a working nonce */ do { random_scalar_order_test(&nonce); } while(!secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try signing */ CHECK(secp256k1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); CHECK(secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); /* try verifying */ CHECK(secp256k1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg)); CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); /* cleanup */ if (use_prealloc) { secp256k1_context_preallocated_destroy(none); secp256k1_context_preallocated_destroy(sign); secp256k1_context_preallocated_destroy(vrfy); secp256k1_context_preallocated_destroy(both); free(none_prealloc); free(sign_prealloc); free(vrfy_prealloc); free(both_prealloc); } else { secp256k1_context_destroy(none); secp256k1_context_destroy(sign); secp256k1_context_destroy(vrfy); secp256k1_context_destroy(both); } /* Defined as no-op. */ secp256k1_context_destroy(NULL); secp256k1_context_preallocated_destroy(NULL); } void run_scratch_tests(void) { const size_t adj_alloc = ((500 + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; int32_t ecount = 0; size_t checkpoint; size_t checkpoint_2; secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); secp256k1_scratch_space *scratch; secp256k1_scratch_space local_scratch; /* Test public API */ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); scratch = secp256k1_scratch_space_create(none, 1000); CHECK(scratch != NULL); CHECK(ecount == 0); /* Test internal API */ CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); CHECK(scratch->alloc_size == 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating 500 bytes succeeds */ checkpoint = secp256k1_scratch_checkpoint(&none->error_callback, scratch); CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* Allocating another 501 bytes fails */ CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 501) == NULL); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); CHECK(scratch->alloc_size != 0); CHECK(scratch->alloc_size % ALIGNMENT == 0); /* ...but it succeeds once we apply the checkpoint to undo it */ secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); CHECK(scratch->alloc_size == 0); CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); CHECK(scratch->alloc_size != 0); /* try to apply a bad checkpoint */ checkpoint_2 = secp256k1_scratch_checkpoint(&none->error_callback, scratch); secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); CHECK(ecount == 0); secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ CHECK(ecount == 1); secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ CHECK(ecount == 2); /* try to use badly initialized scratch space */ secp256k1_scratch_space_destroy(none, scratch); memset(&local_scratch, 0, sizeof(local_scratch)); scratch = &local_scratch; CHECK(!secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0)); CHECK(ecount == 3); CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); CHECK(ecount == 4); secp256k1_scratch_space_destroy(none, scratch); CHECK(ecount == 5); /* Test that large integers do not wrap around in a bad way */ scratch = secp256k1_scratch_space_create(none, 1000); /* Try max allocation with a large number of objects. Only makes sense if * ALIGNMENT is greater than 1 because otherwise the objects take no extra * space. */ CHECK(ALIGNMENT <= 1 || !secp256k1_scratch_max_allocation(&none->error_callback, scratch, (SIZE_MAX / (ALIGNMENT - 1)) + 1)); /* Try allocating SIZE_MAX to test wrap around which only happens if * ALIGNMENT > 1, otherwise it returns NULL anyway because the scratch * space is too small. */ CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, SIZE_MAX) == NULL); secp256k1_scratch_space_destroy(none, scratch); /* cleanup */ secp256k1_scratch_space_destroy(none, NULL); /* no-op */ secp256k1_context_destroy(none); } /***** HASH TESTS *****/ void run_sha256_tests(void) { static const char *inputs[8] = { "", "abc", "message digest", "secure hash algorithm", "SHA256 is considered to be safe", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", "For this sample, this 63-byte string will be used as input data", "This is exactly 64 bytes long, not counting the terminating byte" }; static const unsigned char outputs[8][32] = { {0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, {0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}, {0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50}, {0xf3, 0x0c, 0xeb, 0x2b, 0xb2, 0x82, 0x9e, 0x79, 0xe4, 0xca, 0x97, 0x53, 0xd3, 0x5a, 0x8e, 0xcc, 0x00, 0x26, 0x2d, 0x16, 0x4c, 0xc0, 0x77, 0x08, 0x02, 0x95, 0x38, 0x1c, 0xbd, 0x64, 0x3f, 0x0d}, {0x68, 0x19, 0xd9, 0x15, 0xc7, 0x3f, 0x4d, 0x1e, 0x77, 0xe4, 0xe1, 0xb5, 0x2d, 0x1f, 0xa0, 0xf9, 0xcf, 0x9b, 0xea, 0xea, 0xd3, 0x93, 0x9f, 0x15, 0x87, 0x4b, 0xd9, 0x88, 0xe2, 0xa2, 0x36, 0x30}, {0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1}, {0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42}, {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8} }; int i; for (i = 0; i < 8; i++) { unsigned char out[32]; secp256k1_sha256 hasher; secp256k1_sha256_initialize(&hasher); secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); secp256k1_sha256_finalize(&hasher, out); CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { int split = secp256k1_testrand_int(strlen(inputs[i])); secp256k1_sha256_initialize(&hasher); secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); secp256k1_sha256_finalize(&hasher, out); CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); } } } void run_hmac_sha256_tests(void) { static const char *keys[6] = { "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", "\x4a\x65\x66\x65", "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" }; static const char *inputs[6] = { "\x48\x69\x20\x54\x68\x65\x72\x65", "\x77\x68\x61\x74\x20\x64\x6f\x20\x79\x61\x20\x77\x61\x6e\x74\x20\x66\x6f\x72\x20\x6e\x6f\x74\x68\x69\x6e\x67\x3f", "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd", "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd", "\x54\x65\x73\x74\x20\x55\x73\x69\x6e\x67\x20\x4c\x61\x72\x67\x65\x72\x20\x54\x68\x61\x6e\x20\x42\x6c\x6f\x63\x6b\x2d\x53\x69\x7a\x65\x20\x4b\x65\x79\x20\x2d\x20\x48\x61\x73\x68\x20\x4b\x65\x79\x20\x46\x69\x72\x73\x74", "\x54\x68\x69\x73\x20\x69\x73\x20\x61\x20\x74\x65\x73\x74\x20\x75\x73\x69\x6e\x67\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x6b\x65\x79\x20\x61\x6e\x64\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x64\x61\x74\x61\x2e\x20\x54\x68\x65\x20\x6b\x65\x79\x20\x6e\x65\x65\x64\x73\x20\x74\x6f\x20\x62\x65\x20\x68\x61\x73\x68\x65\x64\x20\x62\x65\x66\x6f\x72\x65\x20\x62\x65\x69\x6e\x67\x20\x75\x73\x65\x64\x20\x62\x79\x20\x74\x68\x65\x20\x48\x4d\x41\x43\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x2e" }; static const unsigned char outputs[6][32] = { {0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53, 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b, 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7, 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7}, {0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43}, {0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46, 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7, 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22, 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe}, {0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e, 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a, 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07, 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b}, {0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54}, {0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2} }; int i; for (i = 0; i < 6; i++) { secp256k1_hmac_sha256 hasher; unsigned char out[32]; secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); secp256k1_hmac_sha256_finalize(&hasher, out); CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); if (strlen(inputs[i]) > 0) { int split = secp256k1_testrand_int(strlen(inputs[i])); secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); secp256k1_hmac_sha256_finalize(&hasher, out); CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0); } } } void run_rfc6979_hmac_sha256_tests(void) { static const unsigned char key1[65] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x4b, 0xf5, 0x12, 0x2f, 0x34, 0x45, 0x54, 0xc5, 0x3b, 0xde, 0x2e, 0xbb, 0x8c, 0xd2, 0xb7, 0xe3, 0xd1, 0x60, 0x0a, 0xd6, 0x31, 0xc3, 0x85, 0xa5, 0xd7, 0xcc, 0xe2, 0x3c, 0x77, 0x85, 0x45, 0x9a, 0}; static const unsigned char out1[3][32] = { {0x4f, 0xe2, 0x95, 0x25, 0xb2, 0x08, 0x68, 0x09, 0x15, 0x9a, 0xcd, 0xf0, 0x50, 0x6e, 0xfb, 0x86, 0xb0, 0xec, 0x93, 0x2c, 0x7b, 0xa4, 0x42, 0x56, 0xab, 0x32, 0x1e, 0x42, 0x1e, 0x67, 0xe9, 0xfb}, {0x2b, 0xf0, 0xff, 0xf1, 0xd3, 0xc3, 0x78, 0xa2, 0x2d, 0xc5, 0xde, 0x1d, 0x85, 0x65, 0x22, 0x32, 0x5c, 0x65, 0xb5, 0x04, 0x49, 0x1a, 0x0c, 0xbd, 0x01, 0xcb, 0x8f, 0x3a, 0xa6, 0x7f, 0xfd, 0x4a}, {0xf5, 0x28, 0xb4, 0x10, 0xcb, 0x54, 0x1f, 0x77, 0x00, 0x0d, 0x7a, 0xfb, 0x6c, 0x5b, 0x53, 0xc5, 0xc4, 0x71, 0xea, 0xb4, 0x3e, 0x46, 0x6d, 0x9a, 0xc5, 0x19, 0x0c, 0x39, 0xc8, 0x2f, 0xd8, 0x2e} }; static const unsigned char key2[64] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}; static const unsigned char out2[3][32] = { {0x9c, 0x23, 0x6c, 0x16, 0x5b, 0x82, 0xae, 0x0c, 0xd5, 0x90, 0x65, 0x9e, 0x10, 0x0b, 0x6b, 0xab, 0x30, 0x36, 0xe7, 0xba, 0x8b, 0x06, 0x74, 0x9b, 0xaf, 0x69, 0x81, 0xe1, 0x6f, 0x1a, 0x2b, 0x95}, {0xdf, 0x47, 0x10, 0x61, 0x62, 0x5b, 0xc0, 0xea, 0x14, 0xb6, 0x82, 0xfe, 0xee, 0x2c, 0x9c, 0x02, 0xf2, 0x35, 0xda, 0x04, 0x20, 0x4c, 0x1d, 0x62, 0xa1, 0x53, 0x6c, 0x6e, 0x17, 0xae, 0xd7, 0xa9}, {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94} }; secp256k1_rfc6979_hmac_sha256 rng; unsigned char out[32]; int i; secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64); for (i = 0; i < 3; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); CHECK(secp256k1_memcmp_var(out, out1[i], 32) == 0); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65); for (i = 0; i < 3; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); CHECK(secp256k1_memcmp_var(out, out1[i], 32) != 0); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64); for (i = 0; i < 3; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); CHECK(secp256k1_memcmp_var(out, out2[i], 32) == 0); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); } /***** RANDOM TESTS *****/ void test_rand_bits(int rand32, int bits) { /* (1-1/2^B)^rounds[B] < 1/10^9, so rounds is the number of iterations to * get a false negative chance below once in a billion */ static const unsigned int rounds[7] = {1, 30, 73, 156, 322, 653, 1316}; /* We try multiplying the results with various odd numbers, which shouldn't * influence the uniform distribution modulo a power of 2. */ static const uint32_t mults[6] = {1, 3, 21, 289, 0x9999, 0x80402011}; /* We only select up to 6 bits from the output to analyse */ unsigned int usebits = bits > 6 ? 6 : bits; unsigned int maxshift = bits - usebits; /* For each of the maxshift+1 usebits-bit sequences inside a bits-bit number, track all observed outcomes, one per bit in a uint64_t. */ uint64_t x[6][27] = {{0}}; unsigned int i, shift, m; /* Multiply the output of all rand calls with the odd number m, which should not change the uniformity of its distribution. */ for (i = 0; i < rounds[usebits]; i++) { uint32_t r = (rand32 ? secp256k1_testrand32() : secp256k1_testrand_bits(bits)); CHECK((((uint64_t)r) >> bits) == 0); for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { uint32_t rm = r * mults[m]; for (shift = 0; shift <= maxshift; shift++) { x[m][shift] |= (((uint64_t)1) << ((rm >> shift) & ((1 << usebits) - 1))); } } } for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { for (shift = 0; shift <= maxshift; shift++) { /* Test that the lower usebits bits of x[shift] are 1 */ CHECK(((~x[m][shift]) << (64 - (1 << usebits))) == 0); } } } /* Subrange must be a whole divisor of range, and at most 64 */ void test_rand_int(uint32_t range, uint32_t subrange) { /* (1-1/subrange)^rounds < 1/10^9 */ int rounds = (subrange * 2073) / 100; int i; uint64_t x = 0; CHECK((range % subrange) == 0); for (i = 0; i < rounds; i++) { uint32_t r = secp256k1_testrand_int(range); CHECK(r < range); r = r % subrange; x |= (((uint64_t)1) << r); } /* Test that the lower subrange bits of x are 1. */ CHECK(((~x) << (64 - subrange)) == 0); } void run_rand_bits(void) { size_t b; test_rand_bits(1, 32); for (b = 1; b <= 32; b++) { test_rand_bits(0, b); } } void run_rand_int(void) { static const uint32_t ms[] = {1, 3, 17, 1000, 13771, 999999, 33554432}; static const uint32_t ss[] = {1, 3, 6, 9, 13, 31, 64}; unsigned int m, s; for (m = 0; m < sizeof(ms) / sizeof(ms[0]); m++) { for (s = 0; s < sizeof(ss) / sizeof(ss[0]); s++) { test_rand_int(ms[m] * ss[s], ss[s]); } } } /***** NUM TESTS *****/ #ifndef USE_NUM_NONE void random_num_negate(secp256k1_num *num) { if (secp256k1_testrand_bits(1)) { secp256k1_num_negate(num); } } void random_num_order_test(secp256k1_num *num) { secp256k1_scalar sc; random_scalar_order_test(&sc); secp256k1_scalar_get_num(num, &sc); } void random_num_order(secp256k1_num *num) { secp256k1_scalar sc; random_scalar_order(&sc); secp256k1_scalar_get_num(num, &sc); } void test_num_negate(void) { secp256k1_num n1; secp256k1_num n2; random_num_order_test(&n1); /* n1 = R */ random_num_negate(&n1); secp256k1_num_copy(&n2, &n1); /* n2 = R */ secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ CHECK(secp256k1_num_is_zero(&n1)); secp256k1_num_copy(&n1, &n2); /* n1 = R */ secp256k1_num_negate(&n1); /* n1 = -R */ CHECK(!secp256k1_num_is_zero(&n1)); secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ CHECK(secp256k1_num_is_zero(&n1)); secp256k1_num_copy(&n1, &n2); /* n1 = R */ secp256k1_num_negate(&n1); /* n1 = -R */ CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2)); secp256k1_num_negate(&n1); /* n1 = R */ CHECK(secp256k1_num_eq(&n1, &n2)); } void test_num_add_sub(void) { int i; secp256k1_scalar s; secp256k1_num n1; secp256k1_num n2; secp256k1_num n1p2, n2p1, n1m2, n2m1; random_num_order_test(&n1); /* n1 = R1 */ if (secp256k1_testrand_bits(1)) { random_num_negate(&n1); } random_num_order_test(&n2); /* n2 = R2 */ if (secp256k1_testrand_bits(1)) { random_num_negate(&n2); } secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ CHECK(secp256k1_num_eq(&n1p2, &n2p1)); CHECK(!secp256k1_num_eq(&n1p2, &n1m2)); secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ CHECK(secp256k1_num_eq(&n2m1, &n1m2)); CHECK(!secp256k1_num_eq(&n2m1, &n1)); secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ CHECK(secp256k1_num_eq(&n2m1, &n1)); CHECK(!secp256k1_num_eq(&n2p1, &n1)); secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ CHECK(secp256k1_num_eq(&n2p1, &n1)); /* check is_one */ secp256k1_scalar_set_int(&s, 1); secp256k1_scalar_get_num(&n1, &s); CHECK(secp256k1_num_is_one(&n1)); /* check that 2^n + 1 is never 1 */ secp256k1_scalar_get_num(&n2, &s); for (i = 0; i < 250; ++i) { secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */ secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ CHECK(!secp256k1_num_is_one(&n1p2)); } } void test_num_mod(void) { int i; secp256k1_scalar s; secp256k1_num order, n; /* check that 0 mod anything is 0 */ random_scalar_order_test(&s); secp256k1_scalar_get_num(&order, &s); secp256k1_scalar_set_int(&s, 0); secp256k1_scalar_get_num(&n, &s); secp256k1_num_mod(&n, &order); CHECK(secp256k1_num_is_zero(&n)); /* check that anything mod 1 is 0 */ secp256k1_scalar_set_int(&s, 1); secp256k1_scalar_get_num(&order, &s); secp256k1_scalar_get_num(&n, &s); secp256k1_num_mod(&n, &order); CHECK(secp256k1_num_is_zero(&n)); /* check that increasing the number past 2^256 does not break this */ random_scalar_order_test(&s); secp256k1_scalar_get_num(&n, &s); /* multiply by 2^8, which'll test this case with high probability */ for (i = 0; i < 8; ++i) { secp256k1_num_add(&n, &n, &n); } secp256k1_num_mod(&n, &order); CHECK(secp256k1_num_is_zero(&n)); } void test_num_jacobi(void) { secp256k1_scalar sqr; secp256k1_scalar small; secp256k1_scalar five; /* five is not a quadratic residue */ secp256k1_num order, n; int i; /* squares mod 5 are 1, 4 */ const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; /* check some small values with 5 as the order */ secp256k1_scalar_set_int(&five, 5); secp256k1_scalar_get_num(&order, &five); for (i = 0; i < 10; ++i) { secp256k1_scalar_set_int(&small, i); secp256k1_scalar_get_num(&n, &small); CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]); } /** test large values with 5 as group order */ secp256k1_scalar_get_num(&order, &five); /* we first need a scalar which is not a multiple of 5 */ do { secp256k1_num fiven; random_scalar_order_test(&sqr); secp256k1_scalar_get_num(&fiven, &five); secp256k1_scalar_get_num(&n, &sqr); secp256k1_num_mod(&n, &fiven); } while (secp256k1_num_is_zero(&n)); /* next force it to be a residue. 2 is a nonresidue mod 5 so we can * just multiply by two, i.e. add the number to itself */ if (secp256k1_num_jacobi(&n, &order) == -1) { secp256k1_num_add(&n, &n, &n); } /* test residue */ CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* test nonresidue */ secp256k1_num_add(&n, &n, &n); CHECK(secp256k1_num_jacobi(&n, &order) == -1); /** test with secp group order as order */ secp256k1_scalar_order_get_num(&order); random_scalar_order_test(&sqr); secp256k1_scalar_sqr(&sqr, &sqr); /* test residue */ secp256k1_scalar_get_num(&n, &sqr); CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* test nonresidue */ secp256k1_scalar_mul(&sqr, &sqr, &five); secp256k1_scalar_get_num(&n, &sqr); CHECK(secp256k1_num_jacobi(&n, &order) == -1); /* test multiple of the order*/ CHECK(secp256k1_num_jacobi(&order, &order) == 0); /* check one less than the order */ secp256k1_scalar_set_int(&small, 1); secp256k1_scalar_get_num(&n, &small); secp256k1_num_sub(&n, &order, &n); CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ } void run_num_smalltests(void) { int i; for (i = 0; i < 100*count; i++) { test_num_negate(); test_num_add_sub(); test_num_mod(); test_num_jacobi(); } } #endif /***** SCALAR TESTS *****/ void scalar_test(void) { secp256k1_scalar s; secp256k1_scalar s1; secp256k1_scalar s2; #ifndef USE_NUM_NONE secp256k1_num snum, s1num, s2num; secp256k1_num order, half_order; #endif unsigned char c[32]; /* Set 's' to a random scalar, with value 'snum'. */ random_scalar_order_test(&s); /* Set 's1' to a random scalar, with value 's1num'. */ random_scalar_order_test(&s1); /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ random_scalar_order_test(&s2); secp256k1_scalar_get_b32(c, &s2); #ifndef USE_NUM_NONE secp256k1_scalar_get_num(&snum, &s); secp256k1_scalar_get_num(&s1num, &s1); secp256k1_scalar_get_num(&s2num, &s2); secp256k1_scalar_order_get_num(&order); half_order = order; secp256k1_num_shift(&half_order, 1); #endif { int i; /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ secp256k1_scalar n; secp256k1_scalar_set_int(&n, 0); for (i = 0; i < 256; i += 4) { secp256k1_scalar t; int j; secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); for (j = 0; j < 4; j++) { secp256k1_scalar_add(&n, &n, &n); } secp256k1_scalar_add(&n, &n, &t); } CHECK(secp256k1_scalar_eq(&n, &s)); } { /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ secp256k1_scalar n; int i = 0; secp256k1_scalar_set_int(&n, 0); while (i < 256) { secp256k1_scalar t; int j; int now = secp256k1_testrand_int(15) + 1; if (now + i > 256) { now = 256 - i; } secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now)); for (j = 0; j < now; j++) { secp256k1_scalar_add(&n, &n, &n); } secp256k1_scalar_add(&n, &n, &t); i += now; } CHECK(secp256k1_scalar_eq(&n, &s)); } #ifndef USE_NUM_NONE { /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ secp256k1_num rnum; secp256k1_num r2num; secp256k1_scalar r; secp256k1_num_add(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &order); secp256k1_scalar_add(&r, &s, &s2); secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); } { /* Test that multiplying the scalars is equal to multiplying their numbers modulo the order. */ secp256k1_scalar r; secp256k1_num r2num; secp256k1_num rnum; secp256k1_num_mul(&rnum, &snum, &s2num); secp256k1_num_mod(&rnum, &order); secp256k1_scalar_mul(&r, &s, &s2); secp256k1_scalar_get_num(&r2num, &r); CHECK(secp256k1_num_eq(&rnum, &r2num)); /* The result can only be zero if at least one of the factors was zero. */ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2))); /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2))); CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s))); } { secp256k1_scalar neg; secp256k1_num negnum; secp256k1_num negnum2; /* Check that comparison with zero matches comparison with zero on the number. */ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); /* Check that comparison with the half order is equal to testing for high scalar. */ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0)); secp256k1_scalar_negate(&neg, &s); secp256k1_num_sub(&negnum, &order, &snum); secp256k1_num_mod(&negnum, &order); /* Check that comparison with the half order is equal to testing for high scalar after negation. */ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0)); /* Negating should change the high property, unless the value was already zero. */ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); secp256k1_scalar_get_num(&negnum2, &neg); /* Negating a scalar should be equal to (order - n) mod order on the number. */ CHECK(secp256k1_num_eq(&negnum, &negnum2)); secp256k1_scalar_add(&neg, &neg, &s); /* Adding a number to its negation should result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); secp256k1_scalar_negate(&neg, &neg); /* Negating zero should still result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); } { /* Test secp256k1_scalar_mul_shift_var. */ secp256k1_scalar r; secp256k1_num one; secp256k1_num rnum; secp256k1_num rnum2; unsigned char cone[1] = {0x01}; unsigned int shift = 256 + secp256k1_testrand_int(257); secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); secp256k1_num_mul(&rnum, &s1num, &s2num); secp256k1_num_shift(&rnum, shift - 1); secp256k1_num_set_bin(&one, cone, 1); secp256k1_num_add(&rnum, &rnum, &one); secp256k1_num_shift(&rnum, 1); secp256k1_scalar_get_num(&rnum2, &r); CHECK(secp256k1_num_eq(&rnum, &rnum2)); } { /* test secp256k1_scalar_shr_int */ secp256k1_scalar r; int i; random_scalar_order_test(&r); for (i = 0; i < 100; ++i) { int low; int shift = 1 + secp256k1_testrand_int(15); int expected = r.d[0] % (1 << shift); low = secp256k1_scalar_shr_int(&r, shift); CHECK(expected == low); } } #endif { /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ if (!secp256k1_scalar_is_zero(&s)) { secp256k1_scalar inv; #ifndef USE_NUM_NONE secp256k1_num invnum; secp256k1_num invnum2; #endif secp256k1_scalar_inverse(&inv, &s); #ifndef USE_NUM_NONE secp256k1_num_mod_inverse(&invnum, &snum, &order); secp256k1_scalar_get_num(&invnum2, &inv); CHECK(secp256k1_num_eq(&invnum, &invnum2)); #endif secp256k1_scalar_mul(&inv, &inv, &s); /* Multiplying a scalar with its inverse must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); secp256k1_scalar_inverse(&inv, &inv); /* Inverting one must result in one. */ CHECK(secp256k1_scalar_is_one(&inv)); #ifndef USE_NUM_NONE secp256k1_scalar_get_num(&invnum, &inv); CHECK(secp256k1_num_is_one(&invnum)); #endif } } { /* Test commutativity of add. */ secp256k1_scalar r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r2, &s2, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { secp256k1_scalar r1, r2; secp256k1_scalar b; int i; /* Test add_bit. */ int bit = secp256k1_testrand_bits(8); secp256k1_scalar_set_int(&b, 1); CHECK(secp256k1_scalar_is_one(&b)); for (i = 0; i < bit; i++) { secp256k1_scalar_add(&b, &b, &b); } r1 = s1; r2 = s1; if (!secp256k1_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ secp256k1_scalar_cadd_bit(&r2, bit, 1); CHECK(secp256k1_scalar_eq(&r1, &r2)); /* cadd is a noop when flag is zero */ secp256k1_scalar_cadd_bit(&r2, bit, 0); CHECK(secp256k1_scalar_eq(&r1, &r2)); } } { /* Test commutativity of mul. */ secp256k1_scalar r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r2, &s2, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test associativity of add. */ secp256k1_scalar r1, r2; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_add(&r1, &r1, &s); secp256k1_scalar_add(&r2, &s2, &s); secp256k1_scalar_add(&r2, &s1, &r2); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test associativity of mul. */ secp256k1_scalar r1, r2; secp256k1_scalar_mul(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); secp256k1_scalar_mul(&r2, &s2, &s); secp256k1_scalar_mul(&r2, &s1, &r2); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test distributitivity of mul over add. */ secp256k1_scalar r1, r2, t; secp256k1_scalar_add(&r1, &s1, &s2); secp256k1_scalar_mul(&r1, &r1, &s); secp256k1_scalar_mul(&r2, &s1, &s); secp256k1_scalar_mul(&t, &s2, &s); secp256k1_scalar_add(&r2, &r2, &t); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test square. */ secp256k1_scalar r1, r2; secp256k1_scalar_sqr(&r1, &s1); secp256k1_scalar_mul(&r2, &s1, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } { /* Test multiplicative identity. */ secp256k1_scalar r1, v1; secp256k1_scalar_set_int(&v1,1); secp256k1_scalar_mul(&r1, &s1, &v1); CHECK(secp256k1_scalar_eq(&r1, &s1)); } { /* Test additive identity. */ secp256k1_scalar r1, v0; secp256k1_scalar_set_int(&v0,0); secp256k1_scalar_add(&r1, &s1, &v0); CHECK(secp256k1_scalar_eq(&r1, &s1)); } { /* Test zero product property. */ secp256k1_scalar r1, v0; secp256k1_scalar_set_int(&v0,0); secp256k1_scalar_mul(&r1, &s1, &v0); CHECK(secp256k1_scalar_eq(&r1, &v0)); } } void run_scalar_set_b32_seckey_tests(void) { unsigned char b32[32]; secp256k1_scalar s1; secp256k1_scalar s2; /* Usually set_b32 and set_b32_seckey give the same result */ random_scalar_order_b32(b32); secp256k1_scalar_set_b32(&s1, b32, NULL); CHECK(secp256k1_scalar_set_b32_seckey(&s2, b32) == 1); CHECK(secp256k1_scalar_eq(&s1, &s2) == 1); memset(b32, 0, sizeof(b32)); CHECK(secp256k1_scalar_set_b32_seckey(&s2, b32) == 0); memset(b32, 0xFF, sizeof(b32)); CHECK(secp256k1_scalar_set_b32_seckey(&s2, b32) == 0); } void run_scalar_tests(void) { int i; for (i = 0; i < 128 * count; i++) { scalar_test(); } for (i = 0; i < count; i++) { run_scalar_set_b32_seckey_tests(); } { /* (-1)+1 should be zero. */ secp256k1_scalar s, o; secp256k1_scalar_set_int(&s, 1); CHECK(secp256k1_scalar_is_one(&s)); secp256k1_scalar_negate(&o, &s); secp256k1_scalar_add(&o, &o, &s); CHECK(secp256k1_scalar_is_zero(&o)); secp256k1_scalar_negate(&o, &o); CHECK(secp256k1_scalar_is_zero(&o)); } #ifndef USE_NUM_NONE { /* Test secp256k1_scalar_set_b32 boundary conditions */ secp256k1_num order; secp256k1_scalar scalar; unsigned char bin[32]; unsigned char bin_tmp[32]; int overflow = 0; /* 2^256-1 - order */ static const secp256k1_scalar all_ones_minus_order = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000001UL, 0x45512319UL, 0x50B75FC4UL, 0x402DA173UL, 0x2FC9BEBEUL ); /* A scalar set to 0s should be 0. */ memset(bin, 0, 32); secp256k1_scalar_set_b32(&scalar, bin, &overflow); CHECK(overflow == 0); CHECK(secp256k1_scalar_is_zero(&scalar)); /* A scalar with value of the curve order should be 0. */ secp256k1_scalar_order_get_num(&order); secp256k1_num_get_bin(bin, 32, &order); secp256k1_scalar_set_b32(&scalar, bin, &overflow); CHECK(overflow == 1); CHECK(secp256k1_scalar_is_zero(&scalar)); /* A scalar with value of the curve order minus one should not overflow. */ bin[31] -= 1; secp256k1_scalar_set_b32(&scalar, bin, &overflow); CHECK(overflow == 0); secp256k1_scalar_get_b32(bin_tmp, &scalar); CHECK(secp256k1_memcmp_var(bin, bin_tmp, 32) == 0); /* A scalar set to all 1s should overflow. */ memset(bin, 0xFF, 32); secp256k1_scalar_set_b32(&scalar, bin, &overflow); CHECK(overflow == 1); CHECK(secp256k1_scalar_eq(&scalar, &all_ones_minus_order)); } #endif { /* Does check_overflow check catch all ones? */ static const secp256k1_scalar overflowed = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); CHECK(secp256k1_scalar_check_overflow(&overflowed)); } { /* Static test vectors. * These were reduced from ~10^12 random vectors based on comparison-decision * and edge-case coverage on 32-bit and 64-bit implementations. * The responses were generated with Sage 5.9. */ secp256k1_scalar x; secp256k1_scalar y; secp256k1_scalar z; secp256k1_scalar zz; secp256k1_scalar one; secp256k1_scalar r1; secp256k1_scalar r2; #if defined(USE_SCALAR_INV_NUM) secp256k1_scalar zzv; #endif int overflow; unsigned char chal[33][2][32] = { {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0xc0, 0xff, 0xff, 0xff}, {0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff}}, {{0xef, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x80, 0xff}}, {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x00}, {0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff, 0xff}}, {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0xf8, 0xff, 0xff, 0xff, 0xfd, 0xff}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0xf8, 0xff, 0x03, 0x00, 0xe0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff, 0xf3, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}}, {{0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x80, 0xff, 0xff, 0x3f, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff}}, {{0xff, 0xff, 0xff, 0xff, 0x00, 0x0f, 0xfc, 0x9f, 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0x0f, 0xfc, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, {0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0xf8, 0xff, 0x0f, 0xc0, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x80, 0xff, 0xff, 0xff}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xf0}, {0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, {{0x00, 0xf8, 0xff, 0x03, 0xff, 0xff, 0xff, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0xc0, 0xff, 0x0f, 0xfc, 0xff}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x3f, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, {{0x8f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, {{0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00, 0x00, 0x80, 0xff, 0x7f}, {0xff, 0xcf, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0x01, 0xfc, 0xff, 0x01, 0x00, 0xfe, 0xff}, {0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00}}, {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0x01, 0x00, 0xf0, 0xff, 0xff, 0xe0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0x00}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x3f, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0xf8, 0x07, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x7e, 0x00, 0x00}}, {{0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0xfe, 0x07, 0x00}, {0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60}}, {{0xff, 0x01, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x80, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0xff, 0xff, 0x1f, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00}}, {{0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xcf, 0xff, 0x1f, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00}, {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0x7f, 0x00, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x80, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f, 0xf8, 0xff, 0xff, 0x1f, 0x00, 0xfe}}, {{0xff, 0xff, 0xff, 0x3f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0x03, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0x01, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xc0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0xff, 0xff, 0xff}}, {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7e, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x07, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0xff, 0x01, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, {{0xff, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff}, {0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0xc0, 0xf1, 0x7f, 0x00}}, {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00}, {0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x80, 0x1f, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x01, 0xff, 0xff}}, {{0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0x03, 0xe0, 0x01, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xf0, 0x07, 0x00, 0x3c, 0x80, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0xe0, 0xff, 0x00, 0x00, 0x00}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80}, {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x7f, 0xfe, 0xff, 0x1f, 0x00, 0xfe, 0xff, 0x03, 0x00, 0x00, 0xfe, 0xff}}, {{0xff, 0xff, 0x81, 0xff, 0xff, 0xff, 0xff, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x83, 0xff, 0xff, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xf0}, {0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0xf8, 0x07, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff}}, {{0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb, 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}, {0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb, 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}} }; unsigned char res[33][2][32] = { {{0x0c, 0x3b, 0x0a, 0xca, 0x8d, 0x1a, 0x2f, 0xb9, 0x8a, 0x7b, 0x53, 0x5a, 0x1f, 0xc5, 0x22, 0xa1, 0x07, 0x2a, 0x48, 0xea, 0x02, 0xeb, 0xb3, 0xd6, 0x20, 0x1e, 0x86, 0xd0, 0x95, 0xf6, 0x92, 0x35}, {0xdc, 0x90, 0x7a, 0x07, 0x2e, 0x1e, 0x44, 0x6d, 0xf8, 0x15, 0x24, 0x5b, 0x5a, 0x96, 0x37, 0x9c, 0x37, 0x7b, 0x0d, 0xac, 0x1b, 0x65, 0x58, 0x49, 0x43, 0xb7, 0x31, 0xbb, 0xa7, 0xf4, 0x97, 0x15}}, {{0xf1, 0xf7, 0x3a, 0x50, 0xe6, 0x10, 0xba, 0x22, 0x43, 0x4d, 0x1f, 0x1f, 0x7c, 0x27, 0xca, 0x9c, 0xb8, 0xb6, 0xa0, 0xfc, 0xd8, 0xc0, 0x05, 0x2f, 0xf7, 0x08, 0xe1, 0x76, 0xdd, 0xd0, 0x80, 0xc8}, {0xe3, 0x80, 0x80, 0xb8, 0xdb, 0xe3, 0xa9, 0x77, 0x00, 0xb0, 0xf5, 0x2e, 0x27, 0xe2, 0x68, 0xc4, 0x88, 0xe8, 0x04, 0xc1, 0x12, 0xbf, 0x78, 0x59, 0xe6, 0xa9, 0x7c, 0xe1, 0x81, 0xdd, 0xb9, 0xd5}}, {{0x96, 0xe2, 0xee, 0x01, 0xa6, 0x80, 0x31, 0xef, 0x5c, 0xd0, 0x19, 0xb4, 0x7d, 0x5f, 0x79, 0xab, 0xa1, 0x97, 0xd3, 0x7e, 0x33, 0xbb, 0x86, 0x55, 0x60, 0x20, 0x10, 0x0d, 0x94, 0x2d, 0x11, 0x7c}, {0xcc, 0xab, 0xe0, 0xe8, 0x98, 0x65, 0x12, 0x96, 0x38, 0x5a, 0x1a, 0xf2, 0x85, 0x23, 0x59, 0x5f, 0xf9, 0xf3, 0xc2, 0x81, 0x70, 0x92, 0x65, 0x12, 0x9c, 0x65, 0x1e, 0x96, 0x00, 0xef, 0xe7, 0x63}}, {{0xac, 0x1e, 0x62, 0xc2, 0x59, 0xfc, 0x4e, 0x5c, 0x83, 0xb0, 0xd0, 0x6f, 0xce, 0x19, 0xf6, 0xbf, 0xa4, 0xb0, 0xe0, 0x53, 0x66, 0x1f, 0xbf, 0xc9, 0x33, 0x47, 0x37, 0xa9, 0x3d, 0x5d, 0xb0, 0x48}, {0x86, 0xb9, 0x2a, 0x7f, 0x8e, 0xa8, 0x60, 0x42, 0x26, 0x6d, 0x6e, 0x1c, 0xa2, 0xec, 0xe0, 0xe5, 0x3e, 0x0a, 0x33, 0xbb, 0x61, 0x4c, 0x9f, 0x3c, 0xd1, 0xdf, 0x49, 0x33, 0xcd, 0x72, 0x78, 0x18}}, {{0xf7, 0xd3, 0xcd, 0x49, 0x5c, 0x13, 0x22, 0xfb, 0x2e, 0xb2, 0x2f, 0x27, 0xf5, 0x8a, 0x5d, 0x74, 0xc1, 0x58, 0xc5, 0xc2, 0x2d, 0x9f, 0x52, 0xc6, 0x63, 0x9f, 0xba, 0x05, 0x76, 0x45, 0x7a, 0x63}, {0x8a, 0xfa, 0x55, 0x4d, 0xdd, 0xa3, 0xb2, 0xc3, 0x44, 0xfd, 0xec, 0x72, 0xde, 0xef, 0xc0, 0x99, 0xf5, 0x9f, 0xe2, 0x52, 0xb4, 0x05, 0x32, 0x58, 0x57, 0xc1, 0x8f, 0xea, 0xc3, 0x24, 0x5b, 0x94}}, {{0x05, 0x83, 0xee, 0xdd, 0x64, 0xf0, 0x14, 0x3b, 0xa0, 0x14, 0x4a, 0x3a, 0x41, 0x82, 0x7c, 0xa7, 0x2c, 0xaa, 0xb1, 0x76, 0xbb, 0x59, 0x64, 0x5f, 0x52, 0xad, 0x25, 0x29, 0x9d, 0x8f, 0x0b, 0xb0}, {0x7e, 0xe3, 0x7c, 0xca, 0xcd, 0x4f, 0xb0, 0x6d, 0x7a, 0xb2, 0x3e, 0xa0, 0x08, 0xb9, 0xa8, 0x2d, 0xc2, 0xf4, 0x99, 0x66, 0xcc, 0xac, 0xd8, 0xb9, 0x72, 0x2a, 0x4a, 0x3e, 0x0f, 0x7b, 0xbf, 0xf4}}, {{0x8c, 0x9c, 0x78, 0x2b, 0x39, 0x61, 0x7e, 0xf7, 0x65, 0x37, 0x66, 0x09, 0x38, 0xb9, 0x6f, 0x70, 0x78, 0x87, 0xff, 0xcf, 0x93, 0xca, 0x85, 0x06, 0x44, 0x84, 0xa7, 0xfe, 0xd3, 0xa4, 0xe3, 0x7e}, {0xa2, 0x56, 0x49, 0x23, 0x54, 0xa5, 0x50, 0xe9, 0x5f, 0xf0, 0x4d, 0xe7, 0xdc, 0x38, 0x32, 0x79, 0x4f, 0x1c, 0xb7, 0xe4, 0xbb, 0xf8, 0xbb, 0x2e, 0x40, 0x41, 0x4b, 0xcc, 0xe3, 0x1e, 0x16, 0x36}}, {{0x0c, 0x1e, 0xd7, 0x09, 0x25, 0x40, 0x97, 0xcb, 0x5c, 0x46, 0xa8, 0xda, 0xef, 0x25, 0xd5, 0xe5, 0x92, 0x4d, 0xcf, 0xa3, 0xc4, 0x5d, 0x35, 0x4a, 0xe4, 0x61, 0x92, 0xf3, 0xbf, 0x0e, 0xcd, 0xbe}, {0xe4, 0xaf, 0x0a, 0xb3, 0x30, 0x8b, 0x9b, 0x48, 0x49, 0x43, 0xc7, 0x64, 0x60, 0x4a, 0x2b, 0x9e, 0x95, 0x5f, 0x56, 0xe8, 0x35, 0xdc, 0xeb, 0xdc, 0xc7, 0xc4, 0xfe, 0x30, 0x40, 0xc7, 0xbf, 0xa4}}, {{0xd4, 0xa0, 0xf5, 0x81, 0x49, 0x6b, 0xb6, 0x8b, 0x0a, 0x69, 0xf9, 0xfe, 0xa8, 0x32, 0xe5, 0xe0, 0xa5, 0xcd, 0x02, 0x53, 0xf9, 0x2c, 0xe3, 0x53, 0x83, 0x36, 0xc6, 0x02, 0xb5, 0xeb, 0x64, 0xb8}, {0x1d, 0x42, 0xb9, 0xf9, 0xe9, 0xe3, 0x93, 0x2c, 0x4c, 0xee, 0x6c, 0x5a, 0x47, 0x9e, 0x62, 0x01, 0x6b, 0x04, 0xfe, 0xa4, 0x30, 0x2b, 0x0d, 0x4f, 0x71, 0x10, 0xd3, 0x55, 0xca, 0xf3, 0x5e, 0x80}}, {{0x77, 0x05, 0xf6, 0x0c, 0x15, 0x9b, 0x45, 0xe7, 0xb9, 0x11, 0xb8, 0xf5, 0xd6, 0xda, 0x73, 0x0c, 0xda, 0x92, 0xea, 0xd0, 0x9d, 0xd0, 0x18, 0x92, 0xce, 0x9a, 0xaa, 0xee, 0x0f, 0xef, 0xde, 0x30}, {0xf1, 0xf1, 0xd6, 0x9b, 0x51, 0xd7, 0x77, 0x62, 0x52, 0x10, 0xb8, 0x7a, 0x84, 0x9d, 0x15, 0x4e, 0x07, 0xdc, 0x1e, 0x75, 0x0d, 0x0c, 0x3b, 0xdb, 0x74, 0x58, 0x62, 0x02, 0x90, 0x54, 0x8b, 0x43}}, {{0xa6, 0xfe, 0x0b, 0x87, 0x80, 0x43, 0x67, 0x25, 0x57, 0x5d, 0xec, 0x40, 0x50, 0x08, 0xd5, 0x5d, 0x43, 0xd7, 0xe0, 0xaa, 0xe0, 0x13, 0xb6, 0xb0, 0xc0, 0xd4, 0xe5, 0x0d, 0x45, 0x83, 0xd6, 0x13}, {0x40, 0x45, 0x0a, 0x92, 0x31, 0xea, 0x8c, 0x60, 0x8c, 0x1f, 0xd8, 0x76, 0x45, 0xb9, 0x29, 0x00, 0x26, 0x32, 0xd8, 0xa6, 0x96, 0x88, 0xe2, 0xc4, 0x8b, 0xdb, 0x7f, 0x17, 0x87, 0xcc, 0xc8, 0xf2}}, {{0xc2, 0x56, 0xe2, 0xb6, 0x1a, 0x81, 0xe7, 0x31, 0x63, 0x2e, 0xbb, 0x0d, 0x2f, 0x81, 0x67, 0xd4, 0x22, 0xe2, 0x38, 0x02, 0x25, 0x97, 0xc7, 0x88, 0x6e, 0xdf, 0xbe, 0x2a, 0xa5, 0x73, 0x63, 0xaa}, {0x50, 0x45, 0xe2, 0xc3, 0xbd, 0x89, 0xfc, 0x57, 0xbd, 0x3c, 0xa3, 0x98, 0x7e, 0x7f, 0x36, 0x38, 0x92, 0x39, 0x1f, 0x0f, 0x81, 0x1a, 0x06, 0x51, 0x1f, 0x8d, 0x6a, 0xff, 0x47, 0x16, 0x06, 0x9c}}, {{0x33, 0x95, 0xa2, 0x6f, 0x27, 0x5f, 0x9c, 0x9c, 0x64, 0x45, 0xcb, 0xd1, 0x3c, 0xee, 0x5e, 0x5f, 0x48, 0xa6, 0xaf, 0xe3, 0x79, 0xcf, 0xb1, 0xe2, 0xbf, 0x55, 0x0e, 0xa2, 0x3b, 0x62, 0xf0, 0xe4}, {0x14, 0xe8, 0x06, 0xe3, 0xbe, 0x7e, 0x67, 0x01, 0xc5, 0x21, 0x67, 0xd8, 0x54, 0xb5, 0x7f, 0xa4, 0xf9, 0x75, 0x70, 0x1c, 0xfd, 0x79, 0xdb, 0x86, 0xad, 0x37, 0x85, 0x83, 0x56, 0x4e, 0xf0, 0xbf}}, {{0xbc, 0xa6, 0xe0, 0x56, 0x4e, 0xef, 0xfa, 0xf5, 0x1d, 0x5d, 0x3f, 0x2a, 0x5b, 0x19, 0xab, 0x51, 0xc5, 0x8b, 0xdd, 0x98, 0x28, 0x35, 0x2f, 0xc3, 0x81, 0x4f, 0x5c, 0xe5, 0x70, 0xb9, 0xeb, 0x62}, {0xc4, 0x6d, 0x26, 0xb0, 0x17, 0x6b, 0xfe, 0x6c, 0x12, 0xf8, 0xe7, 0xc1, 0xf5, 0x2f, 0xfa, 0x91, 0x13, 0x27, 0xbd, 0x73, 0xcc, 0x33, 0x31, 0x1c, 0x39, 0xe3, 0x27, 0x6a, 0x95, 0xcf, 0xc5, 0xfb}}, {{0x30, 0xb2, 0x99, 0x84, 0xf0, 0x18, 0x2a, 0x6e, 0x1e, 0x27, 0xed, 0xa2, 0x29, 0x99, 0x41, 0x56, 0xe8, 0xd4, 0x0d, 0xef, 0x99, 0x9c, 0xf3, 0x58, 0x29, 0x55, 0x1a, 0xc0, 0x68, 0xd6, 0x74, 0xa4}, {0x07, 0x9c, 0xe7, 0xec, 0xf5, 0x36, 0x73, 0x41, 0xa3, 0x1c, 0xe5, 0x93, 0x97, 0x6a, 0xfd, 0xf7, 0x53, 0x18, 0xab, 0xaf, 0xeb, 0x85, 0xbd, 0x92, 0x90, 0xab, 0x3c, 0xbf, 0x30, 0x82, 0xad, 0xf6}}, {{0xc6, 0x87, 0x8a, 0x2a, 0xea, 0xc0, 0xa9, 0xec, 0x6d, 0xd3, 0xdc, 0x32, 0x23, 0xce, 0x62, 0x19, 0xa4, 0x7e, 0xa8, 0xdd, 0x1c, 0x33, 0xae, 0xd3, 0x4f, 0x62, 0x9f, 0x52, 0xe7, 0x65, 0x46, 0xf4}, {0x97, 0x51, 0x27, 0x67, 0x2d, 0xa2, 0x82, 0x87, 0x98, 0xd3, 0xb6, 0x14, 0x7f, 0x51, 0xd3, 0x9a, 0x0b, 0xd0, 0x76, 0x81, 0xb2, 0x4f, 0x58, 0x92, 0xa4, 0x86, 0xa1, 0xa7, 0x09, 0x1d, 0xef, 0x9b}}, {{0xb3, 0x0f, 0x2b, 0x69, 0x0d, 0x06, 0x90, 0x64, 0xbd, 0x43, 0x4c, 0x10, 0xe8, 0x98, 0x1c, 0xa3, 0xe1, 0x68, 0xe9, 0x79, 0x6c, 0x29, 0x51, 0x3f, 0x41, 0xdc, 0xdf, 0x1f, 0xf3, 0x60, 0xbe, 0x33}, {0xa1, 0x5f, 0xf7, 0x1d, 0xb4, 0x3e, 0x9b, 0x3c, 0xe7, 0xbd, 0xb6, 0x06, 0xd5, 0x60, 0x06, 0x6d, 0x50, 0xd2, 0xf4, 0x1a, 0x31, 0x08, 0xf2, 0xea, 0x8e, 0xef, 0x5f, 0x7d, 0xb6, 0xd0, 0xc0, 0x27}}, {{0x62, 0x9a, 0xd9, 0xbb, 0x38, 0x36, 0xce, 0xf7, 0x5d, 0x2f, 0x13, 0xec, 0xc8, 0x2d, 0x02, 0x8a, 0x2e, 0x72, 0xf0, 0xe5, 0x15, 0x9d, 0x72, 0xae, 0xfc, 0xb3, 0x4f, 0x02, 0xea, 0xe1, 0x09, 0xfe}, {0x00, 0x00, 0x00, 0x00, 0xfa, 0x0a, 0x3d, 0xbc, 0xad, 0x16, 0x0c, 0xb6, 0xe7, 0x7c, 0x8b, 0x39, 0x9a, 0x43, 0xbb, 0xe3, 0xc2, 0x55, 0x15, 0x14, 0x75, 0xac, 0x90, 0x9b, 0x7f, 0x9a, 0x92, 0x00}}, {{0x8b, 0xac, 0x70, 0x86, 0x29, 0x8f, 0x00, 0x23, 0x7b, 0x45, 0x30, 0xaa, 0xb8, 0x4c, 0xc7, 0x8d, 0x4e, 0x47, 0x85, 0xc6, 0x19, 0xe3, 0x96, 0xc2, 0x9a, 0xa0, 0x12, 0xed, 0x6f, 0xd7, 0x76, 0x16}, {0x45, 0xaf, 0x7e, 0x33, 0xc7, 0x7f, 0x10, 0x6c, 0x7c, 0x9f, 0x29, 0xc1, 0xa8, 0x7e, 0x15, 0x84, 0xe7, 0x7d, 0xc0, 0x6d, 0xab, 0x71, 0x5d, 0xd0, 0x6b, 0x9f, 0x97, 0xab, 0xcb, 0x51, 0x0c, 0x9f}}, {{0x9e, 0xc3, 0x92, 0xb4, 0x04, 0x9f, 0xc8, 0xbb, 0xdd, 0x9e, 0xc6, 0x05, 0xfd, 0x65, 0xec, 0x94, 0x7f, 0x2c, 0x16, 0xc4, 0x40, 0xac, 0x63, 0x7b, 0x7d, 0xb8, 0x0c, 0xe4, 0x5b, 0xe3, 0xa7, 0x0e}, {0x43, 0xf4, 0x44, 0xe8, 0xcc, 0xc8, 0xd4, 0x54, 0x33, 0x37, 0x50, 0xf2, 0x87, 0x42, 0x2e, 0x00, 0x49, 0x60, 0x62, 0x02, 0xfd, 0x1a, 0x7c, 0xdb, 0x29, 0x6c, 0x6d, 0x54, 0x53, 0x08, 0xd1, 0xc8}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, {{0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1, 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0, 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59, 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}, {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1, 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0, 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59, 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}}, {{0x28, 0x56, 0xac, 0x0e, 0x4f, 0x98, 0x09, 0xf0, 0x49, 0xfa, 0x7f, 0x84, 0xac, 0x7e, 0x50, 0x5b, 0x17, 0x43, 0x14, 0x89, 0x9c, 0x53, 0xa8, 0x94, 0x30, 0xf2, 0x11, 0x4d, 0x92, 0x14, 0x27, 0xe8}, {0x39, 0x7a, 0x84, 0x56, 0x79, 0x9d, 0xec, 0x26, 0x2c, 0x53, 0xc1, 0x94, 0xc9, 0x8d, 0x9e, 0x9d, 0x32, 0x1f, 0xdd, 0x84, 0x04, 0xe8, 0xe2, 0x0a, 0x6b, 0xbe, 0xbb, 0x42, 0x40, 0x67, 0x30, 0x6c}}, {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x73, 0x2f, 0xc9, 0xbe, 0xbd}, {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1, 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0, 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59, 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}}, {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, {{0x1c, 0xc4, 0xf7, 0xda, 0x0f, 0x65, 0xca, 0x39, 0x70, 0x52, 0x92, 0x8e, 0xc3, 0xc8, 0x15, 0xea, 0x7f, 0x10, 0x9e, 0x77, 0x4b, 0x6e, 0x2d, 0xdf, 0xe8, 0x30, 0x9d, 0xda, 0xe8, 0x9a, 0x65, 0xae}, {0x02, 0xb0, 0x16, 0xb1, 0x1d, 0xc8, 0x57, 0x7b, 0xa2, 0x3a, 0xa2, 0xa3, 0x38, 0x5c, 0x8f, 0xeb, 0x66, 0x37, 0x91, 0xa8, 0x5f, 0xef, 0x04, 0xf6, 0x59, 0x75, 0xe1, 0xee, 0x92, 0xf6, 0x0e, 0x30}}, {{0x8d, 0x76, 0x14, 0xa4, 0x14, 0x06, 0x9f, 0x9a, 0xdf, 0x4a, 0x85, 0xa7, 0x6b, 0xbf, 0x29, 0x6f, 0xbc, 0x34, 0x87, 0x5d, 0xeb, 0xbb, 0x2e, 0xa9, 0xc9, 0x1f, 0x58, 0xd6, 0x9a, 0x82, 0xa0, 0x56}, {0xd4, 0xb9, 0xdb, 0x88, 0x1d, 0x04, 0xe9, 0x93, 0x8d, 0x3f, 0x20, 0xd5, 0x86, 0xa8, 0x83, 0x07, 0xdb, 0x09, 0xd8, 0x22, 0x1f, 0x7f, 0xf1, 0x71, 0xc8, 0xe7, 0x5d, 0x47, 0xaf, 0x8b, 0x72, 0xe9}}, {{0x83, 0xb9, 0x39, 0xb2, 0xa4, 0xdf, 0x46, 0x87, 0xc2, 0xb8, 0xf1, 0xe6, 0x4c, 0xd1, 0xe2, 0xa9, 0xe4, 0x70, 0x30, 0x34, 0xbc, 0x52, 0x7c, 0x55, 0xa6, 0xec, 0x80, 0xa4, 0xe5, 0xd2, 0xdc, 0x73}, {0x08, 0xf1, 0x03, 0xcf, 0x16, 0x73, 0xe8, 0x7d, 0xb6, 0x7e, 0x9b, 0xc0, 0xb4, 0xc2, 0xa5, 0x86, 0x02, 0x77, 0xd5, 0x27, 0x86, 0xa5, 0x15, 0xfb, 0xae, 0x9b, 0x8c, 0xa9, 0xf9, 0xf8, 0xa8, 0x4a}}, {{0x8b, 0x00, 0x49, 0xdb, 0xfa, 0xf0, 0x1b, 0xa2, 0xed, 0x8a, 0x9a, 0x7a, 0x36, 0x78, 0x4a, 0xc7, 0xf7, 0xad, 0x39, 0xd0, 0x6c, 0x65, 0x7a, 0x41, 0xce, 0xd6, 0xd6, 0x4c, 0x20, 0x21, 0x6b, 0xc7}, {0xc6, 0xca, 0x78, 0x1d, 0x32, 0x6c, 0x6c, 0x06, 0x91, 0xf2, 0x1a, 0xe8, 0x43, 0x16, 0xea, 0x04, 0x3c, 0x1f, 0x07, 0x85, 0xf7, 0x09, 0x22, 0x08, 0xba, 0x13, 0xfd, 0x78, 0x1e, 0x3f, 0x6f, 0x62}}, {{0x25, 0x9b, 0x7c, 0xb0, 0xac, 0x72, 0x6f, 0xb2, 0xe3, 0x53, 0x84, 0x7a, 0x1a, 0x9a, 0x98, 0x9b, 0x44, 0xd3, 0x59, 0xd0, 0x8e, 0x57, 0x41, 0x40, 0x78, 0xa7, 0x30, 0x2f, 0x4c, 0x9c, 0xb9, 0x68}, {0xb7, 0x75, 0x03, 0x63, 0x61, 0xc2, 0x48, 0x6e, 0x12, 0x3d, 0xbf, 0x4b, 0x27, 0xdf, 0xb1, 0x7a, 0xff, 0x4e, 0x31, 0x07, 0x83, 0xf4, 0x62, 0x5b, 0x19, 0xa5, 0xac, 0xa0, 0x32, 0x58, 0x0d, 0xa7}}, {{0x43, 0x4f, 0x10, 0xa4, 0xca, 0xdb, 0x38, 0x67, 0xfa, 0xae, 0x96, 0xb5, 0x6d, 0x97, 0xff, 0x1f, 0xb6, 0x83, 0x43, 0xd3, 0xa0, 0x2d, 0x70, 0x7a, 0x64, 0x05, 0x4c, 0xa7, 0xc1, 0xa5, 0x21, 0x51}, {0xe4, 0xf1, 0x23, 0x84, 0xe1, 0xb5, 0x9d, 0xf2, 0xb8, 0x73, 0x8b, 0x45, 0x2b, 0x35, 0x46, 0x38, 0x10, 0x2b, 0x50, 0xf8, 0x8b, 0x35, 0xcd, 0x34, 0xc8, 0x0e, 0xf6, 0xdb, 0x09, 0x35, 0xf0, 0xda}}, {{0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34, 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13, 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}, {0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34, 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13, 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} }; secp256k1_scalar_set_int(&one, 1); for (i = 0; i < 33; i++) { secp256k1_scalar_set_b32(&x, chal[i][0], &overflow); CHECK(!overflow); secp256k1_scalar_set_b32(&y, chal[i][1], &overflow); CHECK(!overflow); secp256k1_scalar_set_b32(&r1, res[i][0], &overflow); CHECK(!overflow); secp256k1_scalar_set_b32(&r2, res[i][1], &overflow); CHECK(!overflow); secp256k1_scalar_mul(&z, &x, &y); CHECK(!secp256k1_scalar_check_overflow(&z)); CHECK(secp256k1_scalar_eq(&r1, &z)); if (!secp256k1_scalar_is_zero(&y)) { secp256k1_scalar_inverse(&zz, &y); CHECK(!secp256k1_scalar_check_overflow(&zz)); #if defined(USE_SCALAR_INV_NUM) secp256k1_scalar_inverse_var(&zzv, &y); CHECK(secp256k1_scalar_eq(&zzv, &zz)); #endif secp256k1_scalar_mul(&z, &z, &zz); CHECK(!secp256k1_scalar_check_overflow(&z)); CHECK(secp256k1_scalar_eq(&x, &z)); secp256k1_scalar_mul(&zz, &zz, &y); CHECK(!secp256k1_scalar_check_overflow(&zz)); CHECK(secp256k1_scalar_eq(&one, &zz)); } secp256k1_scalar_mul(&z, &x, &x); CHECK(!secp256k1_scalar_check_overflow(&z)); secp256k1_scalar_sqr(&zz, &x); CHECK(!secp256k1_scalar_check_overflow(&zz)); CHECK(secp256k1_scalar_eq(&zz, &z)); CHECK(secp256k1_scalar_eq(&r2, &zz)); } } } /***** FIELD TESTS *****/ void random_fe(secp256k1_fe *x) { unsigned char bin[32]; do { secp256k1_testrand256(bin); if (secp256k1_fe_set_b32(x, bin)) { return; } } while(1); } void random_fe_test(secp256k1_fe *x) { unsigned char bin[32]; do { secp256k1_testrand256_test(bin); if (secp256k1_fe_set_b32(x, bin)) { return; } } while(1); } void random_fe_non_zero(secp256k1_fe *nz) { int tries = 10; while (--tries >= 0) { random_fe(nz); secp256k1_fe_normalize(nz); if (!secp256k1_fe_is_zero(nz)) { break; } } /* Infinitesimal probability of spurious failure here */ CHECK(tries >= 0); } void random_fe_non_square(secp256k1_fe *ns) { secp256k1_fe r; random_fe_non_zero(ns); if (secp256k1_fe_sqrt(&r, ns)) { secp256k1_fe_negate(ns, ns, 1); } } int check_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { secp256k1_fe an = *a; secp256k1_fe bn = *b; secp256k1_fe_normalize_weak(&an); secp256k1_fe_normalize_var(&bn); return secp256k1_fe_equal_var(&an, &bn); } int check_fe_inverse(const secp256k1_fe *a, const secp256k1_fe *ai) { secp256k1_fe x; secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); secp256k1_fe_mul(&x, a, ai); return check_fe_equal(&x, &one); } void run_field_convert(void) { static const unsigned char b32[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40 }; static const secp256k1_fe_storage fes = SECP256K1_FE_STORAGE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); static const secp256k1_fe fe = SECP256K1_FE_CONST( 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL ); secp256k1_fe fe2; unsigned char b322[32]; secp256k1_fe_storage fes2; /* Check conversions to fe. */ CHECK(secp256k1_fe_set_b32(&fe2, b32)); CHECK(secp256k1_fe_equal_var(&fe, &fe2)); secp256k1_fe_from_storage(&fe2, &fes); CHECK(secp256k1_fe_equal_var(&fe, &fe2)); /* Check conversion from fe. */ secp256k1_fe_get_b32(b322, &fe); CHECK(secp256k1_memcmp_var(b322, b32, 32) == 0); secp256k1_fe_to_storage(&fes2, &fe); CHECK(secp256k1_memcmp_var(&fes2, &fes, sizeof(fes)) == 0); } int fe_secp256k1_memcmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { secp256k1_fe t = *b; #ifdef VERIFY t.magnitude = a->magnitude; t.normalized = a->normalized; #endif return secp256k1_memcmp_var(a, &t, sizeof(secp256k1_fe)); } void run_field_misc(void) { secp256k1_fe x; secp256k1_fe y; secp256k1_fe z; secp256k1_fe q; secp256k1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); int i, j; for (i = 0; i < 5*count; i++) { secp256k1_fe_storage xs, ys, zs; random_fe(&x); random_fe_non_zero(&y); /* Test the fe equality and comparison operations. */ CHECK(secp256k1_fe_cmp_var(&x, &x) == 0); CHECK(secp256k1_fe_equal_var(&x, &x)); z = x; secp256k1_fe_add(&z,&y); /* Test fe conditional move; z is not normalized here. */ q = x; secp256k1_fe_cmov(&x, &z, 0); #ifdef VERIFY CHECK(x.normalized && x.magnitude == 1); #endif secp256k1_fe_cmov(&x, &x, 1); CHECK(fe_secp256k1_memcmp_var(&x, &z) != 0); CHECK(fe_secp256k1_memcmp_var(&x, &q) == 0); secp256k1_fe_cmov(&q, &z, 1); #ifdef VERIFY CHECK(!q.normalized && q.magnitude == z.magnitude); #endif CHECK(fe_secp256k1_memcmp_var(&q, &z) == 0); secp256k1_fe_normalize_var(&x); secp256k1_fe_normalize_var(&z); CHECK(!secp256k1_fe_equal_var(&x, &z)); secp256k1_fe_normalize_var(&q); secp256k1_fe_cmov(&q, &z, (i&1)); #ifdef VERIFY CHECK(q.normalized && q.magnitude == 1); #endif for (j = 0; j < 6; j++) { secp256k1_fe_negate(&z, &z, j+1); secp256k1_fe_normalize_var(&q); secp256k1_fe_cmov(&q, &z, (j&1)); #ifdef VERIFY CHECK((q.normalized != (j&1)) && q.magnitude == ((j&1) ? z.magnitude : 1)); #endif } secp256k1_fe_normalize_var(&z); /* Test storage conversion and conditional moves. */ secp256k1_fe_to_storage(&xs, &x); secp256k1_fe_to_storage(&ys, &y); secp256k1_fe_to_storage(&zs, &z); secp256k1_fe_storage_cmov(&zs, &xs, 0); secp256k1_fe_storage_cmov(&zs, &zs, 1); CHECK(secp256k1_memcmp_var(&xs, &zs, sizeof(xs)) != 0); secp256k1_fe_storage_cmov(&ys, &xs, 1); CHECK(secp256k1_memcmp_var(&xs, &ys, sizeof(xs)) == 0); secp256k1_fe_from_storage(&x, &xs); secp256k1_fe_from_storage(&y, &ys); secp256k1_fe_from_storage(&z, &zs); /* Test that mul_int, mul, and add agree. */ secp256k1_fe_add(&y, &x); secp256k1_fe_add(&y, &x); z = x; secp256k1_fe_mul_int(&z, 3); CHECK(check_fe_equal(&y, &z)); secp256k1_fe_add(&y, &x); secp256k1_fe_add(&z, &x); CHECK(check_fe_equal(&z, &y)); z = x; secp256k1_fe_mul_int(&z, 5); secp256k1_fe_mul(&q, &x, &fe5); CHECK(check_fe_equal(&z, &q)); secp256k1_fe_negate(&x, &x, 1); secp256k1_fe_add(&z, &x); secp256k1_fe_add(&q, &x); CHECK(check_fe_equal(&y, &z)); CHECK(check_fe_equal(&q, &y)); } } void run_field_inv(void) { secp256k1_fe x, xi, xii; int i; for (i = 0; i < 10*count; i++) { random_fe_non_zero(&x); secp256k1_fe_inv(&xi, &x); CHECK(check_fe_inverse(&x, &xi)); secp256k1_fe_inv(&xii, &xi); CHECK(check_fe_equal(&x, &xii)); } } void run_field_inv_var(void) { secp256k1_fe x, xi, xii; int i; for (i = 0; i < 10*count; i++) { random_fe_non_zero(&x); secp256k1_fe_inv_var(&xi, &x); CHECK(check_fe_inverse(&x, &xi)); secp256k1_fe_inv_var(&xii, &xi); CHECK(check_fe_equal(&x, &xii)); } } void run_field_inv_all_var(void) { secp256k1_fe x[16], xi[16], xii[16]; int i; /* Check it's safe to call for 0 elements */ secp256k1_fe_inv_all_var(xi, x, 0); for (i = 0; i < count; i++) { size_t j; size_t len = secp256k1_testrand_int(15) + 1; for (j = 0; j < len; j++) { random_fe_non_zero(&x[j]); } secp256k1_fe_inv_all_var(xi, x, len); for (j = 0; j < len; j++) { CHECK(check_fe_inverse(&x[j], &xi[j])); } secp256k1_fe_inv_all_var(xii, xi, len); for (j = 0; j < len; j++) { CHECK(check_fe_equal(&x[j], &xii[j])); } } } void run_sqr(void) { secp256k1_fe x, s; { int i; secp256k1_fe_set_int(&x, 1); secp256k1_fe_negate(&x, &x, 1); for (i = 1; i <= 512; ++i) { secp256k1_fe_mul_int(&x, 2); secp256k1_fe_normalize(&x); secp256k1_fe_sqr(&s, &x); } } } void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) { secp256k1_fe r1, r2; int v = secp256k1_fe_sqrt(&r1, a); CHECK((v == 0) == (k == NULL)); if (k != NULL) { /* Check that the returned root is +/- the given known answer */ secp256k1_fe_negate(&r2, &r1, 1); secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k); secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2); CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2)); } } void run_sqrt(void) { secp256k1_fe ns, x, s, t; int i; /* Check sqrt(0) is 0 */ secp256k1_fe_set_int(&x, 0); secp256k1_fe_sqr(&s, &x); test_sqrt(&s, &x); /* Check sqrt of small squares (and their negatives) */ for (i = 1; i <= 100; i++) { secp256k1_fe_set_int(&x, i); secp256k1_fe_sqr(&s, &x); test_sqrt(&s, &x); secp256k1_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); } /* Consistency checks for large random values */ for (i = 0; i < 10; i++) { int j; random_fe_non_square(&ns); for (j = 0; j < count; j++) { random_fe(&x); secp256k1_fe_sqr(&s, &x); test_sqrt(&s, &x); secp256k1_fe_negate(&t, &s, 1); test_sqrt(&t, NULL); secp256k1_fe_mul(&t, &s, &ns); test_sqrt(&t, NULL); } } } /***** GROUP TESTS *****/ void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); } /* This compares jacobian points including their Z, not just their geometric meaning. */ int gej_xyz_equals_gej(const secp256k1_gej *a, const secp256k1_gej *b) { secp256k1_gej a2; secp256k1_gej b2; int ret = 1; ret &= a->infinity == b->infinity; if (ret && !a->infinity) { a2 = *a; b2 = *b; secp256k1_fe_normalize(&a2.x); secp256k1_fe_normalize(&a2.y); secp256k1_fe_normalize(&a2.z); secp256k1_fe_normalize(&b2.x); secp256k1_fe_normalize(&b2.y); secp256k1_fe_normalize(&b2.z); ret &= secp256k1_fe_cmp_var(&a2.x, &b2.x) == 0; ret &= secp256k1_fe_cmp_var(&a2.y, &b2.y) == 0; ret &= secp256k1_fe_cmp_var(&a2.z, &b2.z) == 0; } return ret; } void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { secp256k1_fe z2s; secp256k1_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ secp256k1_fe_sqr(&z2s, &b->z); secp256k1_fe_mul(&u1, &a->x, &z2s); u2 = b->x; secp256k1_fe_normalize_weak(&u2); secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); s2 = b->y; secp256k1_fe_normalize_weak(&s2); CHECK(secp256k1_fe_equal_var(&u1, &u2)); CHECK(secp256k1_fe_equal_var(&s1, &s2)); } void test_ge(void) { int i, i1; -#ifdef USE_ENDOMORPHISM int runs = 6; -#else - int runs = 4; -#endif - /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4). - * The second in each pair of identical points uses a random Z coordinate in the Jacobian form. - * All magnitudes are randomized. - * All 17*17 combinations of points are added to each other, using all applicable methods. - * - * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well. + /* 25 points are used: + * - infinity + * - for each of four random points p1 p2 p3 p4, we add the point, its + * negation, and then those two again but with randomized Z coordinate. + * - The same is then done for lambda*p1 and lambda^2*p1. */ secp256k1_ge *ge = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * (1 + 4 * runs)); secp256k1_gej *gej = (secp256k1_gej *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_gej) * (1 + 4 * runs)); secp256k1_fe *zinv = (secp256k1_fe *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs)); secp256k1_fe zf; secp256k1_fe zfi2, zfi3; secp256k1_gej_set_infinity(&gej[0]); secp256k1_ge_clear(&ge[0]); secp256k1_ge_set_gej_var(&ge[0], &gej[0]); for (i = 0; i < runs; i++) { int j; secp256k1_ge g; random_group_element_test(&g); -#ifdef USE_ENDOMORPHISM if (i >= runs - 2) { secp256k1_ge_mul_lambda(&g, &ge[1]); } if (i >= runs - 1) { secp256k1_ge_mul_lambda(&g, &g); } -#endif ge[1 + 4 * i] = g; ge[2 + 4 * i] = g; secp256k1_ge_neg(&ge[3 + 4 * i], &g); secp256k1_ge_neg(&ge[4 + 4 * i], &g); secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]); secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]); for (j = 0; j < 4; j++) { random_field_element_magnitude(&ge[1 + j + 4 * i].x); random_field_element_magnitude(&ge[1 + j + 4 * i].y); random_field_element_magnitude(&gej[1 + j + 4 * i].x); random_field_element_magnitude(&gej[1 + j + 4 * i].y); random_field_element_magnitude(&gej[1 + j + 4 * i].z); } } /* Compute z inverses. */ { secp256k1_fe *zs = checked_malloc(&ctx->error_callback, sizeof(secp256k1_fe) * (1 + 4 * runs)); for (i = 0; i < 4 * runs + 1; i++) { if (i == 0) { /* The point at infinity does not have a meaningful z inverse. Any should do. */ do { random_field_element_test(&zs[i]); } while(secp256k1_fe_is_zero(&zs[i])); } else { zs[i] = gej[i].z; } } secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1); free(zs); } /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */ do { random_field_element_test(&zf); } while(secp256k1_fe_is_zero(&zf)); random_field_element_magnitude(&zf); secp256k1_fe_inv_var(&zfi3, &zf); secp256k1_fe_sqr(&zfi2, &zfi3); secp256k1_fe_mul(&zfi3, &zfi3, &zfi2); for (i1 = 0; i1 < 1 + 4 * runs; i1++) { int i2; for (i2 = 0; i2 < 1 + 4 * runs; i2++) { /* Compute reference result using gej + gej (var). */ secp256k1_gej refj, resj; secp256k1_ge ref; secp256k1_fe zr; secp256k1_gej_add_var(&refj, &gej[i1], &gej[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr); /* Check Z ratio. */ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&refj)) { secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z); CHECK(secp256k1_fe_equal_var(&zrz, &refj.z)); } secp256k1_ge_set_gej_var(&ref, &refj); /* Test gej + ge with Z ratio result (var). */ secp256k1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr); ge_equals_gej(&ref, &resj); if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&resj)) { secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z); CHECK(secp256k1_fe_equal_var(&zrz, &resj.z)); } /* Test gej + ge (var, with additional Z factor). */ { secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); random_field_element_magnitude(&ge2_zfi.x); random_field_element_magnitude(&ge2_zfi.y); secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); ge_equals_gej(&ref, &resj); } /* Test gej + ge (const). */ if (i2 != 0) { /* secp256k1_gej_add_ge does not support its second argument being infinity. */ secp256k1_gej_add_ge(&resj, &gej[i1], &ge[i2]); ge_equals_gej(&ref, &resj); } /* Test doubling (var). */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) { secp256k1_fe zr2; /* Normal doubling with Z ratio result. */ secp256k1_gej_double_var(&resj, &gej[i1], &zr2); ge_equals_gej(&ref, &resj); /* Check Z ratio. */ secp256k1_fe_mul(&zr2, &zr2, &gej[i1].z); CHECK(secp256k1_fe_equal_var(&zr2, &resj.z)); /* Normal doubling. */ secp256k1_gej_double_var(&resj, &gej[i2], NULL); ge_equals_gej(&ref, &resj); /* Constant-time doubling. */ secp256k1_gej_double(&resj, &gej[i2]); ge_equals_gej(&ref, &resj); } /* Test adding opposites. */ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) { CHECK(secp256k1_ge_is_infinity(&ref)); } /* Test adding infinity. */ if (i1 == 0) { CHECK(secp256k1_ge_is_infinity(&ge[i1])); CHECK(secp256k1_gej_is_infinity(&gej[i1])); ge_equals_gej(&ref, &gej[i2]); } if (i2 == 0) { CHECK(secp256k1_ge_is_infinity(&ge[i2])); CHECK(secp256k1_gej_is_infinity(&gej[i2])); ge_equals_gej(&ref, &gej[i1]); } } } /* Test adding all points together in random order equals infinity. */ { secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY; secp256k1_gej *gej_shuffled = (secp256k1_gej *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_gej)); for (i = 0; i < 4 * runs + 1; i++) { gej_shuffled[i] = gej[i]; } for (i = 0; i < 4 * runs + 1; i++) { int swap = i + secp256k1_testrand_int(4 * runs + 1 - i); if (swap != i) { secp256k1_gej t = gej_shuffled[i]; gej_shuffled[i] = gej_shuffled[swap]; gej_shuffled[swap] = t; } } for (i = 0; i < 4 * runs + 1; i++) { secp256k1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); } CHECK(secp256k1_gej_is_infinity(&sum)); free(gej_shuffled); } /* Test batch gej -> ge conversion with and without known z ratios. */ { secp256k1_fe *zr = (secp256k1_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_fe)); secp256k1_ge *ge_set_all = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge)); for (i = 0; i < 4 * runs + 1; i++) { /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */ if (i < 4 * runs) { secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); } } secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); for (i = 0; i < 4 * runs + 1; i++) { secp256k1_fe s; random_fe_non_zero(&s); secp256k1_gej_rescale(&gej[i], &s); ge_equals_gej(&ge_set_all[i], &gej[i]); } free(ge_set_all); free(zr); } /* Test batch gej -> ge conversion with many infinities. */ for (i = 0; i < 4 * runs + 1; i++) { random_group_element_test(&ge[i]); /* randomly set half the points to infinity */ if(secp256k1_fe_is_odd(&ge[i].x)) { secp256k1_ge_set_infinity(&ge[i]); } secp256k1_gej_set_ge(&gej[i], &ge[i]); } /* batch invert */ secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { ge_equals_gej(&ge[i], &gej[i]); } free(ge); free(gej); free(zinv); } void test_intialized_inf(void) { secp256k1_ge p; secp256k1_gej pj, npj, infj1, infj2, infj3; secp256k1_fe zinv; /* Test that adding P+(-P) results in a fully initalized infinity*/ random_group_element_test(&p); secp256k1_gej_set_ge(&pj, &p); secp256k1_gej_neg(&npj, &pj); secp256k1_gej_add_var(&infj1, &pj, &npj, NULL); CHECK(secp256k1_gej_is_infinity(&infj1)); CHECK(secp256k1_fe_is_zero(&infj1.x)); CHECK(secp256k1_fe_is_zero(&infj1.y)); CHECK(secp256k1_fe_is_zero(&infj1.z)); secp256k1_gej_add_ge_var(&infj2, &npj, &p, NULL); CHECK(secp256k1_gej_is_infinity(&infj2)); CHECK(secp256k1_fe_is_zero(&infj2.x)); CHECK(secp256k1_fe_is_zero(&infj2.y)); CHECK(secp256k1_fe_is_zero(&infj2.z)); secp256k1_fe_set_int(&zinv, 1); secp256k1_gej_add_zinv_var(&infj3, &npj, &p, &zinv); CHECK(secp256k1_gej_is_infinity(&infj3)); CHECK(secp256k1_fe_is_zero(&infj3.x)); CHECK(secp256k1_fe_is_zero(&infj3.y)); CHECK(secp256k1_fe_is_zero(&infj3.z)); } void test_add_neg_y_diff_x(void) { /* The point of this test is to check that we can add two points * whose y-coordinates are negatives of each other but whose x * coordinates differ. If the x-coordinates were the same, these * points would be negatives of each other and their sum is * infinity. This is cool because it "covers up" any degeneracy * in the addition algorithm that would cause the xy coordinates * of the sum to be wrong (since infinity has no xy coordinates). * HOWEVER, if the x-coordinates are different, infinity is the * wrong answer, and such degeneracies are exposed. This is the * root of https://github.com/bitcoin-core/secp256k1/issues/257 * which this test is a regression test for. * * These points were generated in sage as * # secp256k1 params * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) * C = EllipticCurve ([F (0), F (7)]) * G = C.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798) * N = FiniteField(G.order()) * * # endomorphism values (lambda is 1^{1/3} in N, beta is 1^{1/3} in F) * x = polygen(N) * lam = (1 - x^3).roots()[1][0] * * # random "bad pair" * P = C.random_element() * Q = -int(lam) * P * print " P: %x %x" % P.xy() * print " Q: %x %x" % Q.xy() * print "P + Q: %x %x" % (P + Q).xy() */ secp256k1_gej aj = SECP256K1_GEJ_CONST( 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30, 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb, 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8, 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d ); secp256k1_gej bj = SECP256K1_GEJ_CONST( 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86, 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7, 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57, 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2 ); secp256k1_gej sumj = SECP256K1_GEJ_CONST( 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027, 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a, 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08, 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe ); secp256k1_ge b; secp256k1_gej resj; secp256k1_ge res; secp256k1_ge_set_gej(&b, &bj); secp256k1_gej_add_var(&resj, &aj, &bj, NULL); secp256k1_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); secp256k1_gej_add_ge(&resj, &aj, &b); secp256k1_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); secp256k1_gej_add_ge_var(&resj, &aj, &b, NULL); secp256k1_ge_set_gej(&res, &resj); ge_equals_gej(&res, &sumj); } void run_ge(void) { int i; for (i = 0; i < count * 32; i++) { test_ge(); } test_add_neg_y_diff_x(); test_intialized_inf(); } void test_ec_combine(void) { secp256k1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); secp256k1_pubkey data[6]; const secp256k1_pubkey* d[6]; secp256k1_pubkey sd; secp256k1_pubkey sd2; secp256k1_gej Qj; secp256k1_ge Q; int i; for (i = 1; i <= 6; i++) { secp256k1_scalar s; random_scalar_order_test(&s); secp256k1_scalar_add(&sum, &sum, &s); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); secp256k1_ge_set_gej(&Q, &Qj); secp256k1_pubkey_save(&data[i - 1], &Q); d[i - 1] = &data[i - 1]; secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); secp256k1_ge_set_gej(&Q, &Qj); secp256k1_pubkey_save(&sd, &Q); CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1); CHECK(secp256k1_memcmp_var(&sd, &sd2, sizeof(sd)) == 0); } } void run_ec_combine(void) { int i; for (i = 0; i < count * 8; i++) { test_ec_combine(); } } void test_group_decompress(const secp256k1_fe* x) { /* The input itself, normalized. */ secp256k1_fe fex = *x; secp256k1_fe fez; /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */ secp256k1_ge ge_quad, ge_even, ge_odd; secp256k1_gej gej_quad; /* Return values of the above calls. */ int res_quad, res_even, res_odd; secp256k1_fe_normalize_var(&fex); res_quad = secp256k1_ge_set_xquad(&ge_quad, &fex); res_even = secp256k1_ge_set_xo_var(&ge_even, &fex, 0); res_odd = secp256k1_ge_set_xo_var(&ge_odd, &fex, 1); CHECK(res_quad == res_even); CHECK(res_quad == res_odd); if (res_quad) { secp256k1_fe_normalize_var(&ge_quad.x); secp256k1_fe_normalize_var(&ge_odd.x); secp256k1_fe_normalize_var(&ge_even.x); secp256k1_fe_normalize_var(&ge_quad.y); secp256k1_fe_normalize_var(&ge_odd.y); secp256k1_fe_normalize_var(&ge_even.y); /* No infinity allowed. */ CHECK(!ge_quad.infinity); CHECK(!ge_even.infinity); CHECK(!ge_odd.infinity); /* Check that the x coordinates check out. */ CHECK(secp256k1_fe_equal_var(&ge_quad.x, x)); CHECK(secp256k1_fe_equal_var(&ge_even.x, x)); CHECK(secp256k1_fe_equal_var(&ge_odd.x, x)); /* Check that the Y coordinate result in ge_quad is a square. */ CHECK(secp256k1_fe_is_quad_var(&ge_quad.y)); /* Check odd/even Y in ge_odd, ge_even. */ CHECK(secp256k1_fe_is_odd(&ge_odd.y)); CHECK(!secp256k1_fe_is_odd(&ge_even.y)); /* Check secp256k1_gej_has_quad_y_var. */ secp256k1_gej_set_ge(&gej_quad, &ge_quad); CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); do { random_fe_test(&fez); } while (secp256k1_fe_is_zero(&fez)); secp256k1_gej_rescale(&gej_quad, &fez); CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); secp256k1_gej_neg(&gej_quad, &gej_quad); CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); do { random_fe_test(&fez); } while (secp256k1_fe_is_zero(&fez)); secp256k1_gej_rescale(&gej_quad, &fez); CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); secp256k1_gej_neg(&gej_quad, &gej_quad); CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); } } void run_group_decompress(void) { int i; for (i = 0; i < count * 4; i++) { secp256k1_fe fe; random_fe_test(&fe); test_group_decompress(&fe); } } /***** ECMULT TESTS *****/ void run_ecmult_chain(void) { /* random starting point A (on the curve) */ secp256k1_gej a = SECP256K1_GEJ_CONST( 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3, 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004, 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f, 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f ); /* two random initial factors xn and gn */ secp256k1_scalar xn = SECP256K1_SCALAR_CONST( 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c, 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407 ); secp256k1_scalar gn = SECP256K1_SCALAR_CONST( 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9, 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de ); /* two small multipliers to be applied to xn and gn in every iteration: */ static const secp256k1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); static const secp256k1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); /* accumulators with the resulting coefficients to A and G */ secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* actual points */ secp256k1_gej x; secp256k1_gej x2; int i; /* the point being computed */ x = a; for (i = 0; i < 200*count; i++) { /* in each iteration, compute X = xn*X + gn*G; */ secp256k1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn); /* also compute ae and ge: the actual accumulated factors for A and G */ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ secp256k1_scalar_mul(&ae, &ae, &xn); secp256k1_scalar_mul(&ge, &ge, &xn); secp256k1_scalar_add(&ge, &ge, &gn); /* modify xn and gn */ secp256k1_scalar_mul(&xn, &xn, &xf); secp256k1_scalar_mul(&gn, &gn, &gf); /* verify */ if (i == 19999) { /* expected result after 19999 iterations */ secp256k1_gej rp = SECP256K1_GEJ_CONST( 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE, 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830, 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D, 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88 ); secp256k1_gej_neg(&rp, &rp); secp256k1_gej_add_var(&rp, &rp, &x, NULL); CHECK(secp256k1_gej_is_infinity(&rp)); } } /* redo the computation, but directly with the resulting ae and ge coefficients: */ secp256k1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge); secp256k1_gej_neg(&x2, &x2); secp256k1_gej_add_var(&x2, &x2, &x, NULL); CHECK(secp256k1_gej_is_infinity(&x2)); } void test_point_times_order(const secp256k1_gej *point) { /* X * (point + G) + (order-X) * (pointer + G) = 0 */ secp256k1_scalar x; secp256k1_scalar nx; secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); secp256k1_gej res1, res2; secp256k1_ge res3; unsigned char pub[65]; size_t psize = 65; random_scalar_order_test(&x); secp256k1_scalar_negate(&nx, &x); secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */ secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ secp256k1_gej_add_var(&res1, &res1, &res2, NULL); CHECK(secp256k1_gej_is_infinity(&res1)); secp256k1_ge_set_gej(&res3, &res1); CHECK(secp256k1_ge_is_infinity(&res3)); CHECK(secp256k1_ge_is_valid_var(&res3) == 0); CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); psize = 65; CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); /* check zero/one edge cases */ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero); secp256k1_ge_set_gej(&res3, &res1); CHECK(secp256k1_ge_is_infinity(&res3)); secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero); secp256k1_ge_set_gej(&res3, &res1); ge_equals_gej(&res3, point); secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one); secp256k1_ge_set_gej(&res3, &res1); ge_equals_ge(&res3, &secp256k1_ge_const_g); } /* These scalars reach large (in absolute value) outputs when fed to secp256k1_scalar_split_lambda. * * They are computed as: * - For a in [-2, -1, 0, 1, 2]: * - For b in [-3, -1, 1, 3]: * - Output (a*LAMBDA + (ORDER+b)/2) % ORDER */ static const secp256k1_scalar scalars_near_split_bounds[20] = { SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fc), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fd), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6fe), SECP256K1_SCALAR_CONST(0xd938a566, 0x7f479e3e, 0xb5b3c7fa, 0xefdb3749, 0x3aa0585c, 0xc5ea2367, 0xe1b660db, 0x0209e6ff), SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632d), SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632e), SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf7632f), SECP256K1_SCALAR_CONST(0x2c9c52b3, 0x3fa3cf1f, 0x5ad9e3fd, 0x77ed9ba5, 0xb294b893, 0x3722e9a5, 0x00e698ca, 0x4cf76330), SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b209f), SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a0), SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a1), SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xd576e735, 0x57a4501d, 0xdfe92f46, 0x681b20a2), SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede11), SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede12), SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede13), SECP256K1_SCALAR_CONST(0xd363ad4c, 0xc05c30e0, 0xa5261c02, 0x88126459, 0xf85915d7, 0x7825b696, 0xbeebc5c2, 0x833ede14), SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a42), SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a43), SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a44), SECP256K1_SCALAR_CONST(0x26c75a99, 0x80b861c1, 0x4a4c3805, 0x1024c8b4, 0x704d760e, 0xe95e7cd3, 0xde1bfdb1, 0xce2c5a45) }; void test_ecmult_target(const secp256k1_scalar* target, int mode) { /* Mode: 0=ecmult_gen, 1=ecmult, 2=ecmult_const */ secp256k1_scalar n1, n2; secp256k1_ge p; secp256k1_gej pj, p1j, p2j, ptj; static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); /* Generate random n1,n2 such that n1+n2 = -target. */ random_scalar_order_test(&n1); secp256k1_scalar_add(&n2, &n1, target); secp256k1_scalar_negate(&n2, &n2); /* Generate a random input point. */ if (mode != 0) { random_group_element_test(&p); secp256k1_gej_set_ge(&pj, &p); } /* EC multiplications */ if (mode == 0) { secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p1j, &n1); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &p2j, &n2); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &ptj, target); } else if (mode == 1) { secp256k1_ecmult(&ctx->ecmult_ctx, &p1j, &pj, &n1, &zero); secp256k1_ecmult(&ctx->ecmult_ctx, &p2j, &pj, &n2, &zero); secp256k1_ecmult(&ctx->ecmult_ctx, &ptj, &pj, target, &zero); } else { secp256k1_ecmult_const(&p1j, &p, &n1, 256); secp256k1_ecmult_const(&p2j, &p, &n2, 256); secp256k1_ecmult_const(&ptj, &p, target, 256); } /* Add them all up: n1*P + n2*P + target*P = (n1+n2+target)*P = (n1+n1-n1-n2)*P = 0. */ secp256k1_gej_add_var(&ptj, &ptj, &p1j, NULL); secp256k1_gej_add_var(&ptj, &ptj, &p2j, NULL); CHECK(secp256k1_gej_is_infinity(&ptj)); } void run_ecmult_near_split_bound(void) { int i; unsigned j; for (i = 0; i < 4*count; ++i) { for (j = 0; j < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++j) { test_ecmult_target(&scalars_near_split_bounds[j], 0); test_ecmult_target(&scalars_near_split_bounds[j], 1); test_ecmult_target(&scalars_near_split_bounds[j], 2); } } } void run_point_times_order(void) { int i; secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); static const secp256k1_fe xr = SECP256K1_FE_CONST( 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C, 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45 ); for (i = 0; i < 500; i++) { secp256k1_ge p; if (secp256k1_ge_set_xo_var(&p, &x, 1)) { secp256k1_gej j; CHECK(secp256k1_ge_is_valid_var(&p)); secp256k1_gej_set_ge(&j, &p); test_point_times_order(&j); } secp256k1_fe_sqr(&x, &x); } secp256k1_fe_normalize_var(&x); CHECK(secp256k1_fe_equal_var(&x, &xr)); } void ecmult_const_random_mult(void) { /* random starting point A (on the curve) */ secp256k1_ge a = SECP256K1_GE_CONST( 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d ); /* random initial factor xn */ secp256k1_scalar xn = SECP256K1_SCALAR_CONST( 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b ); /* expected xn * A (from sage) */ secp256k1_ge expected_b = SECP256K1_GE_CONST( 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 ); secp256k1_gej b; secp256k1_ecmult_const(&b, &a, &xn, 256); CHECK(secp256k1_ge_is_valid_var(&a)); ge_equals_gej(&expected_b, &b); } void ecmult_const_commutativity(void) { secp256k1_scalar a; secp256k1_scalar b; secp256k1_gej res1; secp256k1_gej res2; secp256k1_ge mid1; secp256k1_ge mid2; random_scalar_order_test(&a); random_scalar_order_test(&b); secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a, 256); secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b, 256); secp256k1_ge_set_gej(&mid1, &res1); secp256k1_ge_set_gej(&mid2, &res2); secp256k1_ecmult_const(&res1, &mid1, &b, 256); secp256k1_ecmult_const(&res2, &mid2, &a, 256); secp256k1_ge_set_gej(&mid1, &res1); secp256k1_ge_set_gej(&mid2, &res2); ge_equals_ge(&mid1, &mid2); } void ecmult_const_mult_zero_one(void) { secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); secp256k1_scalar negone; secp256k1_gej res1; secp256k1_ge res2; secp256k1_ge point; secp256k1_scalar_negate(&negone, &one); random_group_element_test(&point); secp256k1_ecmult_const(&res1, &point, &zero, 3); secp256k1_ge_set_gej(&res2, &res1); CHECK(secp256k1_ge_is_infinity(&res2)); secp256k1_ecmult_const(&res1, &point, &one, 2); secp256k1_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); secp256k1_ecmult_const(&res1, &point, &negone, 256); secp256k1_gej_neg(&res1, &res1); secp256k1_ge_set_gej(&res2, &res1); ge_equals_ge(&res2, &point); } void ecmult_const_chain_multiply(void) { /* Check known result (randomly generated test problem from sage) */ const secp256k1_scalar scalar = SECP256K1_SCALAR_CONST( 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b ); const secp256k1_gej expected_point = SECP256K1_GEJ_CONST( 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 ); secp256k1_gej point; secp256k1_ge res; int i; secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g); for (i = 0; i < 100; ++i) { secp256k1_ge tmp; secp256k1_ge_set_gej(&tmp, &point); secp256k1_ecmult_const(&point, &tmp, &scalar, 256); } secp256k1_ge_set_gej(&res, &point); ge_equals_gej(&res, &expected_point); } void run_ecmult_const_tests(void) { ecmult_const_mult_zero_one(); ecmult_const_random_mult(); ecmult_const_commutativity(); ecmult_const_chain_multiply(); } typedef struct { secp256k1_scalar *sc; secp256k1_ge *pt; } ecmult_multi_data; static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } static int ecmult_multi_false_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { (void)sc; (void)pt; (void)idx; (void)cbdata; return 0; } void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func ecmult_multi) { int ncount; secp256k1_scalar szero; secp256k1_scalar sc[32]; secp256k1_ge pt[32]; secp256k1_gej r; secp256k1_gej r2; ecmult_multi_data data; data.sc = sc; data.pt = pt; secp256k1_scalar_set_int(&szero, 0); /* No points to multiply */ CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0)); /* Check 1- and 2-point multiplies against ecmult */ for (ncount = 0; ncount < count; ncount++) { secp256k1_ge ptg; secp256k1_gej ptgj; random_scalar_order(&sc[0]); random_scalar_order(&sc[1]); random_group_element_test(&ptg); secp256k1_gej_set_ge(&ptgj, &ptg); pt[0] = ptg; pt[1] = secp256k1_ge_const_g; /* only G scalar */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); /* 1-point */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); /* Try to multiply 1 point, but callback returns false */ CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1)); /* 2-point */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); /* 2-point with G scalar */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); } /* Check infinite outputs of various forms */ for (ncount = 0; ncount < count; ncount++) { secp256k1_ge ptg; size_t i, j; size_t sizes[] = { 2, 10, 32 }; for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_scalar_order(&sc[i]); secp256k1_ge_set_infinity(&pt[i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { for (i = 0; i < 32; i++) { random_group_element_test(&ptg); pt[i] = ptg; secp256k1_scalar_set_int(&sc[i], 0); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); } for (j = 0; j < 3; j++) { random_group_element_test(&ptg); for (i = 0; i < 16; i++) { random_scalar_order(&sc[2*i]); secp256k1_scalar_negate(&sc[2*i + 1], &sc[2*i]); pt[2 * i] = ptg; pt[2 * i + 1] = ptg; } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); random_scalar_order(&sc[0]); for (i = 0; i < 16; i++) { random_group_element_test(&ptg); sc[2*i] = sc[0]; sc[2*i+1] = sc[0]; pt[2 * i] = ptg; secp256k1_ge_neg(&pt[2*i+1], &pt[2*i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); } random_group_element_test(&ptg); secp256k1_scalar_set_int(&sc[0], 0); pt[0] = ptg; for (i = 1; i < 32; i++) { pt[i] = ptg; random_scalar_order(&sc[i]); secp256k1_scalar_add(&sc[0], &sc[0], &sc[i]); secp256k1_scalar_negate(&sc[i], &sc[i]); } CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32)); CHECK(secp256k1_gej_is_infinity(&r)); } /* Check random points, constant scalar */ for (ncount = 0; ncount < count; ncount++) { size_t i; secp256k1_gej_set_infinity(&r); random_scalar_order(&sc[0]); for (i = 0; i < 20; i++) { secp256k1_ge ptg; sc[i] = sc[0]; random_group_element_test(&ptg); pt[i] = ptg; secp256k1_gej_add_ge_var(&r, &r, &pt[i], NULL); } secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); } /* Check random scalars, constant point */ for (ncount = 0; ncount < count; ncount++) { size_t i; secp256k1_ge ptg; secp256k1_gej p0j; secp256k1_scalar rs; secp256k1_scalar_set_int(&rs, 0); random_group_element_test(&ptg); for (i = 0; i < 20; i++) { random_scalar_order(&sc[i]); pt[i] = ptg; secp256k1_scalar_add(&rs, &rs, &sc[i]); } secp256k1_gej_set_ge(&p0j, &pt[0]); secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); } /* Sanity check that zero scalars don't cause problems */ for (ncount = 0; ncount < 20; ncount++) { random_scalar_order(&sc[ncount]); random_group_element_test(&pt[ncount]); } secp256k1_scalar_clear(&sc[0]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); secp256k1_scalar_clear(&sc[1]); secp256k1_scalar_clear(&sc[2]); secp256k1_scalar_clear(&sc[3]); secp256k1_scalar_clear(&sc[4]); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6)); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5)); CHECK(secp256k1_gej_is_infinity(&r)); /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */ { const size_t TOP = 8; size_t s0i, s1i; size_t t0i, t1i; secp256k1_ge ptg; secp256k1_gej ptgj; random_group_element_test(&ptg); secp256k1_gej_set_ge(&ptgj, &ptg); for(t0i = 0; t0i < TOP; t0i++) { for(t1i = 0; t1i < TOP; t1i++) { secp256k1_gej t0p, t1p; secp256k1_scalar t0, t1; secp256k1_scalar_set_int(&t0, (t0i + 1) / 2); secp256k1_scalar_cond_negate(&t0, t0i & 1); secp256k1_scalar_set_int(&t1, (t1i + 1) / 2); secp256k1_scalar_cond_negate(&t1, t1i & 1); secp256k1_ecmult(&ctx->ecmult_ctx, &t0p, &ptgj, &t0, &szero); secp256k1_ecmult(&ctx->ecmult_ctx, &t1p, &ptgj, &t1, &szero); for(s0i = 0; s0i < TOP; s0i++) { for(s1i = 0; s1i < TOP; s1i++) { secp256k1_scalar tmp1, tmp2; secp256k1_gej expected, actual; secp256k1_ge_set_gej(&pt[0], &t0p); secp256k1_ge_set_gej(&pt[1], &t1p); secp256k1_scalar_set_int(&sc[0], (s0i + 1) / 2); secp256k1_scalar_cond_negate(&sc[0], s0i & 1); secp256k1_scalar_set_int(&sc[1], (s1i + 1) / 2); secp256k1_scalar_cond_negate(&sc[1], s1i & 1); secp256k1_scalar_mul(&tmp1, &t0, &sc[0]); secp256k1_scalar_mul(&tmp2, &t1, &sc[1]); secp256k1_scalar_add(&tmp1, &tmp1, &tmp2); secp256k1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero); CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2)); secp256k1_gej_neg(&expected, &expected); secp256k1_gej_add_var(&actual, &actual, &expected, NULL); CHECK(secp256k1_gej_is_infinity(&actual)); } } } } } } void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_multi) { secp256k1_scalar szero; secp256k1_scalar sc; secp256k1_ge pt; secp256k1_gej r; ecmult_multi_data data; secp256k1_scratch *scratch_empty; random_group_element_test(&pt); random_scalar_order(&sc); data.sc = ≻ data.pt = &pt; secp256k1_scalar_set_int(&szero, 0); /* Try to multiply 1 point, but scratch space is empty.*/ scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0); CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1)); secp256k1_scratch_destroy(&ctx->error_callback, scratch_empty); } void test_secp256k1_pippenger_bucket_window_inv(void) { int i; CHECK(secp256k1_pippenger_bucket_window_inv(0) == 0); for(i = 1; i <= PIPPENGER_MAX_BUCKET_WINDOW; i++) { -#ifdef USE_ENDOMORPHISM /* Bucket_window of 8 is not used with endo */ if (i == 8) { continue; } -#endif CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)) == i); if (i != PIPPENGER_MAX_BUCKET_WINDOW) { CHECK(secp256k1_pippenger_bucket_window(secp256k1_pippenger_bucket_window_inv(i)+1) > i); } } } /** * Probabilistically test the function returning the maximum number of possible points * for a given scratch space. */ void test_ecmult_multi_pippenger_max_points(void) { size_t scratch_size = secp256k1_testrand_int(256); size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12); secp256k1_scratch *scratch; size_t n_points_supported; int bucket_window = 0; for(; scratch_size < max_size; scratch_size+=256) { size_t i; size_t total_alloc; size_t checkpoint; scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size); CHECK(scratch != NULL); checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch); n_points_supported = secp256k1_pippenger_max_points(&ctx->error_callback, scratch); if (n_points_supported == 0) { secp256k1_scratch_destroy(&ctx->error_callback, scratch); continue; } bucket_window = secp256k1_pippenger_bucket_window(n_points_supported); /* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */ total_alloc = secp256k1_pippenger_scratch_size(n_points_supported, bucket_window); for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) { CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, 1)); total_alloc--; } CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); secp256k1_scratch_destroy(&ctx->error_callback, scratch); } CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW); } void test_ecmult_multi_batch_size_helper(void) { size_t n_batches, n_batch_points, max_n_batch_points, n; max_n_batch_points = 0; n = 1; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); max_n_batch_points = 1; n = 0; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 0); CHECK(n_batch_points == 0); max_n_batch_points = 2; n = 5; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 3); CHECK(n_batch_points == 2); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; n = ECMULT_MAX_POINTS_PER_BATCH; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 1); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH); max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1; n = ECMULT_MAX_POINTS_PER_BATCH + 1; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == 2); CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1); max_n_batch_points = 1; n = SIZE_MAX; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX); CHECK(n_batch_points == 1); max_n_batch_points = 2; n = SIZE_MAX; CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); CHECK(n_batches == SIZE_MAX/2 + 1); CHECK(n_batch_points == 2); } /** * Run secp256k1_ecmult_multi_var with num points and a scratch space restricted to * 1 <= i <= num points. */ void test_ecmult_multi_batching(void) { static const int n_points = 2*ECMULT_PIPPENGER_THRESHOLD; secp256k1_scalar scG; secp256k1_scalar szero; secp256k1_scalar *sc = (secp256k1_scalar *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_scalar) * n_points); secp256k1_ge *pt = (secp256k1_ge *)checked_malloc(&ctx->error_callback, sizeof(secp256k1_ge) * n_points); secp256k1_gej r; secp256k1_gej r2; ecmult_multi_data data; int i; secp256k1_scratch *scratch; secp256k1_gej_set_infinity(&r2); secp256k1_scalar_set_int(&szero, 0); /* Get random scalars and group elements and compute result */ random_scalar_order(&scG); secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r2, &szero, &scG); for(i = 0; i < n_points; i++) { secp256k1_ge ptg; secp256k1_gej ptgj; random_group_element_test(&ptg); secp256k1_gej_set_ge(&ptgj, &ptg); pt[i] = ptg; random_scalar_order(&sc[i]); secp256k1_ecmult(&ctx->ecmult_ctx, &ptgj, &ptgj, &sc[i], NULL); secp256k1_gej_add_var(&r2, &r2, &ptgj, NULL); } data.sc = sc; data.pt = pt; secp256k1_gej_neg(&r2, &r2); /* Test with empty scratch space. It should compute the correct result using * ecmult_mult_simple algorithm which doesn't require a scratch space. */ scratch = secp256k1_scratch_create(&ctx->error_callback, 0); CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); secp256k1_scratch_destroy(&ctx->error_callback, scratch); /* Test with space for 1 point in pippenger. That's not enough because * ecmult_multi selects strauss which requires more memory. It should * therefore select the simple algorithm. */ scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); secp256k1_scratch_destroy(&ctx->error_callback, scratch); for(i = 1; i <= n_points; i++) { if (i > ECMULT_PIPPENGER_THRESHOLD) { int bucket_window = secp256k1_pippenger_bucket_window(i); size_t scratch_size = secp256k1_pippenger_scratch_size(i, bucket_window); scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); } else { size_t scratch_size = secp256k1_strauss_scratch_size(i); scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); } CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); secp256k1_scratch_destroy(&ctx->error_callback, scratch); } free(sc); free(pt); } void run_ecmult_multi_tests(void) { secp256k1_scratch *scratch; test_secp256k1_pippenger_bucket_window_inv(); test_ecmult_multi_pippenger_max_points(); scratch = secp256k1_scratch_create(&ctx->error_callback, 819200); test_ecmult_multi(scratch, secp256k1_ecmult_multi_var); test_ecmult_multi(NULL, secp256k1_ecmult_multi_var); test_ecmult_multi(scratch, secp256k1_ecmult_pippenger_batch_single); test_ecmult_multi_batch_single(secp256k1_ecmult_pippenger_batch_single); test_ecmult_multi(scratch, secp256k1_ecmult_strauss_batch_single); test_ecmult_multi_batch_single(secp256k1_ecmult_strauss_batch_single); secp256k1_scratch_destroy(&ctx->error_callback, scratch); /* Run test_ecmult_multi with space for exactly one point */ scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); test_ecmult_multi(scratch, secp256k1_ecmult_multi_var); secp256k1_scratch_destroy(&ctx->error_callback, scratch); test_ecmult_multi_batch_size_helper(); test_ecmult_multi_batching(); } void test_wnaf(const secp256k1_scalar *number, int w) { secp256k1_scalar x, two, t; int wnaf[256]; int zeroes = -1; int i; int bits; secp256k1_scalar_set_int(&x, 0); secp256k1_scalar_set_int(&two, 2); bits = secp256k1_ecmult_wnaf(wnaf, 256, number, w); CHECK(bits <= 256); for (i = bits-1; i >= 0; i--) { int v = wnaf[i]; secp256k1_scalar_mul(&x, &x, &two); if (v) { CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ zeroes=0; CHECK((v & 1) == 1); /* check non-zero elements are odd */ CHECK(v <= (1 << (w-1)) - 1); /* check range below */ CHECK(v >= -(1 << (w-1)) - 1); /* check range above */ } else { CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */ zeroes++; } if (v >= 0) { secp256k1_scalar_set_int(&t, v); } else { secp256k1_scalar_set_int(&t, -v); secp256k1_scalar_negate(&t, &t); } secp256k1_scalar_add(&x, &x, &t); } CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */ } void test_constant_wnaf_negate(const secp256k1_scalar *number) { secp256k1_scalar neg1 = *number; secp256k1_scalar neg2 = *number; int sign1 = 1; int sign2 = 1; if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) { secp256k1_scalar_negate(&neg1, &neg1); sign1 = -1; } sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2)); CHECK(sign1 == sign2); CHECK(secp256k1_scalar_eq(&neg1, &neg2)); } void test_constant_wnaf(const secp256k1_scalar *number, int w) { secp256k1_scalar x, shift; int wnaf[256] = {0}; int i; int skew; int bits = 256; secp256k1_scalar num = *number; secp256k1_scalar scalar_skew; secp256k1_scalar_set_int(&x, 0); secp256k1_scalar_set_int(&shift, 1 << w); - /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ -#ifdef USE_ENDOMORPHISM for (i = 0; i < 16; ++i) { secp256k1_scalar_shr_int(&num, 8); } bits = 128; -#endif skew = secp256k1_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { secp256k1_scalar t; int v = wnaf[i]; CHECK(v != 0); /* check nonzero */ CHECK(v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ secp256k1_scalar_mul(&x, &x, &shift); if (v >= 0) { secp256k1_scalar_set_int(&t, v); } else { secp256k1_scalar_set_int(&t, -v); secp256k1_scalar_negate(&t, &t); } secp256k1_scalar_add(&x, &x, &t); } /* Skew num because when encoding numbers as odd we use an offset */ secp256k1_scalar_set_int(&scalar_skew, 1 << (skew == 2)); secp256k1_scalar_add(&num, &num, &scalar_skew); CHECK(secp256k1_scalar_eq(&x, &num)); } void test_fixed_wnaf(const secp256k1_scalar *number, int w) { secp256k1_scalar x, shift; int wnaf[256] = {0}; int i; int skew; secp256k1_scalar num = *number; secp256k1_scalar_set_int(&x, 0); secp256k1_scalar_set_int(&shift, 1 << w); - /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ -#ifdef USE_ENDOMORPHISM for (i = 0; i < 16; ++i) { secp256k1_scalar_shr_int(&num, 8); } -#endif skew = secp256k1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { secp256k1_scalar t; int v = wnaf[i]; CHECK(v == 0 || v & 1); /* check parity */ CHECK(v > -(1 << w)); /* check range above */ CHECK(v < (1 << w)); /* check range below */ secp256k1_scalar_mul(&x, &x, &shift); if (v >= 0) { secp256k1_scalar_set_int(&t, v); } else { secp256k1_scalar_set_int(&t, -v); secp256k1_scalar_negate(&t, &t); } secp256k1_scalar_add(&x, &x, &t); } /* If skew is 1 then add 1 to num */ secp256k1_scalar_cadd_bit(&num, 0, skew == 1); CHECK(secp256k1_scalar_eq(&x, &num)); } /* Checks that the first 8 elements of wnaf are equal to wnaf_expected and the * rest is 0.*/ void test_fixed_wnaf_small_helper(int *wnaf, int *wnaf_expected, int w) { int i; for (i = WNAF_SIZE(w)-1; i >= 8; --i) { CHECK(wnaf[i] == 0); } for (i = 7; i >= 0; --i) { CHECK(wnaf[i] == wnaf_expected[i]); } } void test_fixed_wnaf_small(void) { int w = 4; int wnaf[256] = {0}; int i; int skew; secp256k1_scalar num; secp256k1_scalar_set_int(&num, 0); skew = secp256k1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) { int v = wnaf[i]; CHECK(v == 0); } CHECK(skew == 0); secp256k1_scalar_set_int(&num, 1); skew = secp256k1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 1; --i) { int v = wnaf[i]; CHECK(v == 0); } CHECK(wnaf[0] == 1); CHECK(skew == 0); { int wnaf_expected[8] = { 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf, 0xf }; secp256k1_scalar_set_int(&num, 0xffffffff); skew = secp256k1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -1, -1, -1, -1, -1, -1, -1, 0xf }; secp256k1_scalar_set_int(&num, 0xeeeeeeee); skew = secp256k1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 1); } { int wnaf_expected[8] = { 1, 0, 1, 0, 1, 0, 1, 0 }; secp256k1_scalar_set_int(&num, 0x01010101); skew = secp256k1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } { int wnaf_expected[8] = { -0xf, 0, 0xf, -0xf, 0, 0xf, 1, 0 }; secp256k1_scalar_set_int(&num, 0x01ef1ef1); skew = secp256k1_wnaf_fixed(wnaf, &num, w); test_fixed_wnaf_small_helper(wnaf, wnaf_expected, w); CHECK(skew == 0); } } void run_wnaf(void) { int i; secp256k1_scalar n = {{0}}; test_constant_wnaf(&n, 4); /* Sanity check: 1 and 2 are the smallest odd and even numbers and should * have easier-to-diagnose failure modes */ n.d[0] = 1; test_constant_wnaf(&n, 4); n.d[0] = 2; test_constant_wnaf(&n, 4); /* Test -1, because it's a special case in wnaf_const */ n = secp256k1_scalar_one; secp256k1_scalar_negate(&n, &n); test_constant_wnaf(&n, 4); /* Test -2, which may not lead to overflows in wnaf_const */ secp256k1_scalar_add(&n, &secp256k1_scalar_one, &secp256k1_scalar_one); secp256k1_scalar_negate(&n, &n); test_constant_wnaf(&n, 4); /* Test (1/2) - 1 = 1/-2 and 1/2 = (1/-2) + 1 as corner cases of negation handling in wnaf_const */ secp256k1_scalar_inverse(&n, &n); test_constant_wnaf(&n, 4); secp256k1_scalar_add(&n, &n, &secp256k1_scalar_one); test_constant_wnaf(&n, 4); /* Test 0 for fixed wnaf */ test_fixed_wnaf_small(); /* Random tests */ for (i = 0; i < count; i++) { random_scalar_order(&n); test_wnaf(&n, 4+(i%10)); test_constant_wnaf_negate(&n); test_constant_wnaf(&n, 4 + (i % 10)); test_fixed_wnaf(&n, 4 + (i % 10)); } secp256k1_scalar_set_int(&n, 0); CHECK(secp256k1_scalar_cond_negate(&n, 1) == -1); CHECK(secp256k1_scalar_is_zero(&n)); CHECK(secp256k1_scalar_cond_negate(&n, 0) == 1); CHECK(secp256k1_scalar_is_zero(&n)); } void test_ecmult_constants(void) { /* Test ecmult_gen() for [0..36) and [order-36..0). */ secp256k1_scalar x; secp256k1_gej r; secp256k1_ge ng; int i; int j; secp256k1_ge_neg(&ng, &secp256k1_ge_const_g); for (i = 0; i < 36; i++ ) { secp256k1_scalar_set_int(&x, i); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); for (j = 0; j < i; j++) { if (j == i - 1) { ge_equals_gej(&secp256k1_ge_const_g, &r); } secp256k1_gej_add_ge(&r, &r, &ng); } CHECK(secp256k1_gej_is_infinity(&r)); } for (i = 1; i <= 36; i++ ) { secp256k1_scalar_set_int(&x, i); secp256k1_scalar_negate(&x, &x); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); for (j = 0; j < i; j++) { if (j == i - 1) { ge_equals_gej(&ng, &r); } secp256k1_gej_add_ge(&r, &r, &secp256k1_ge_const_g); } CHECK(secp256k1_gej_is_infinity(&r)); } } void run_ecmult_constants(void) { test_ecmult_constants(); } void test_ecmult_gen_blind(void) { /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */ secp256k1_scalar key; secp256k1_scalar b; unsigned char seed32[32]; secp256k1_gej pgej; secp256k1_gej pgej2; secp256k1_gej i; secp256k1_ge pge; random_scalar_order_test(&key); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); secp256k1_testrand256(seed32); b = ctx->ecmult_gen_ctx.blind; i = ctx->ecmult_gen_ctx.initial; secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); CHECK(!secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); CHECK(!gej_xyz_equals_gej(&pgej, &pgej2)); CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial)); secp256k1_ge_set_gej(&pge, &pgej); ge_equals_gej(&pge, &pgej2); } void test_ecmult_gen_blind_reset(void) { /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */ secp256k1_scalar b; secp256k1_gej initial; secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); b = ctx->ecmult_gen_ctx.blind; initial = ctx->ecmult_gen_ctx.initial; secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); CHECK(secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial)); } void run_ecmult_gen_blind(void) { int i; test_ecmult_gen_blind_reset(); for (i = 0; i < 10; i++) { test_ecmult_gen_blind(); } } -#ifdef USE_ENDOMORPHISM /***** ENDOMORPHISH TESTS *****/ void test_scalar_split(const secp256k1_scalar* full) { secp256k1_scalar s, s1, slam; const unsigned char zero[32] = {0}; unsigned char tmp[32]; secp256k1_scalar_split_lambda(&s1, &slam, full); /* check slam*lambda + s1 == full */ secp256k1_scalar_mul(&s, &secp256k1_const_lambda, &slam); secp256k1_scalar_add(&s, &s, &s1); CHECK(secp256k1_scalar_eq(&s, full)); /* check that both are <= 128 bits in size */ if (secp256k1_scalar_is_high(&s1)) { secp256k1_scalar_negate(&s1, &s1); } if (secp256k1_scalar_is_high(&slam)) { secp256k1_scalar_negate(&slam, &slam); } secp256k1_scalar_get_b32(tmp, &s1); CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0); secp256k1_scalar_get_b32(tmp, &slam); CHECK(secp256k1_memcmp_var(zero, tmp, 16) == 0); } void run_endomorphism_tests(void) { unsigned i; static secp256k1_scalar s; test_scalar_split(&secp256k1_scalar_zero); test_scalar_split(&secp256k1_scalar_one); secp256k1_scalar_negate(&s,&secp256k1_scalar_one); test_scalar_split(&s); test_scalar_split(&secp256k1_const_lambda); secp256k1_scalar_add(&s, &secp256k1_const_lambda, &secp256k1_scalar_one); test_scalar_split(&s); for (i = 0; i < 100U * count; ++i) { secp256k1_scalar full; random_scalar_order_test(&full); test_scalar_split(&full); } for (i = 0; i < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++i) { test_scalar_split(&scalars_near_split_bounds[i]); } } -#endif void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { unsigned char pubkeyc[65]; secp256k1_pubkey pubkey; secp256k1_ge ge; size_t pubkeyclen; int32_t ecount; ecount = 0; secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) { /* Smaller sizes are tested exhaustively elsewhere. */ int32_t i; memcpy(&pubkeyc[1], input, 64); VG_UNDEF(&pubkeyc[pubkeyclen], 65 - pubkeyclen); for (i = 0; i < 256; i++) { /* Try all type bytes. */ int xpass; int ypass; int ysign; pubkeyc[0] = i; /* What sign does this point have? */ ysign = (input[63] & 1) + 2; /* For the current type (i) do we expect parsing to work? Handled all of compressed/uncompressed/hybrid. */ xpass = xvalid && (pubkeyclen == 33) && ((i & 254) == 2); /* Do we expect a parse and re-serialize as uncompressed to give a matching y? */ ypass = xvalid && yvalid && ((i & 4) == ((pubkeyclen == 65) << 2)) && ((i == 4) || ((i & 251) == ysign)) && ((pubkeyclen == 33) || (pubkeyclen == 65)); if (xpass || ypass) { /* These cases must parse. */ unsigned char pubkeyo[65]; size_t outl; memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); ecount = 0; CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 33); CHECK(secp256k1_memcmp_var(&pubkeyo[1], &pubkeyc[1], 32) == 0); CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0])); if (ypass) { /* This test isn't always done because we decode with alternative signs, so the y won't match. */ CHECK(pubkeyo[0] == ysign); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); secp256k1_pubkey_save(&pubkey, &ge); VG_CHECK(&pubkey, sizeof(pubkey)); outl = 65; VG_UNDEF(pubkeyo, 65); CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(pubkeyo, outl); CHECK(outl == 65); CHECK(pubkeyo[0] == 4); CHECK(secp256k1_memcmp_var(&pubkeyo[1], input, 64) == 0); } CHECK(ecount == 0); } else { /* These cases must fail to parse. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } } } secp256k1_context_set_illegal_callback(ctx, NULL, NULL); } void run_ec_pubkey_parse_test(void) { #define SECP256K1_EC_PARSE_TEST_NVALID (12) const unsigned char valid[SECP256K1_EC_PARSE_TEST_NVALID][64] = { { /* Point with leading and trailing zeros in x and y serialization. */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xef, 0xa1, 0x7b, 0x77, 0x61, 0xe1, 0xe4, 0x27, 0x06, 0x98, 0x9f, 0xb4, 0x83, 0xb8, 0xd2, 0xd4, 0x9b, 0xf7, 0x8f, 0xae, 0x98, 0x03, 0xf0, 0x99, 0xb8, 0x34, 0xed, 0xeb, 0x00 }, { /* Point with x equal to a 3rd root of unity.*/ 0x7a, 0xe9, 0x6a, 0x2b, 0x65, 0x7c, 0x07, 0x10, 0x6e, 0x64, 0x47, 0x9e, 0xac, 0x34, 0x34, 0xe9, 0x9c, 0xf0, 0x49, 0x75, 0x12, 0xf5, 0x89, 0x95, 0xc1, 0x39, 0x6c, 0x28, 0x71, 0x95, 0x01, 0xee, 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14, 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee, }, { /* Point with largest x. (1/2) */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c, 0x0e, 0x99, 0x4b, 0x14, 0xea, 0x72, 0xf8, 0xc3, 0xeb, 0x95, 0xc7, 0x1e, 0xf6, 0x92, 0x57, 0x5e, 0x77, 0x50, 0x58, 0x33, 0x2d, 0x7e, 0x52, 0xd0, 0x99, 0x5c, 0xf8, 0x03, 0x88, 0x71, 0xb6, 0x7d, }, { /* Point with largest x. (2/2) */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c, 0xf1, 0x66, 0xb4, 0xeb, 0x15, 0x8d, 0x07, 0x3c, 0x14, 0x6a, 0x38, 0xe1, 0x09, 0x6d, 0xa8, 0xa1, 0x88, 0xaf, 0xa7, 0xcc, 0xd2, 0x81, 0xad, 0x2f, 0x66, 0xa3, 0x07, 0xfb, 0x77, 0x8e, 0x45, 0xb2, }, { /* Point with smallest x. (1/2) */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14, 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee, }, { /* Point with smallest x. (2/2) */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb, 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41, }, { /* Point with largest y. (1/3) */ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6, 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, }, { /* Point with largest y. (2/3) */ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c, 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, }, { /* Point with largest y. (3/3) */ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc, 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, }, { /* Point with smallest y. (1/3) */ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6, 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }, { /* Point with smallest y. (2/3) */ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c, 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }, { /* Point with smallest y. (3/3) */ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc, 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 } }; #define SECP256K1_EC_PARSE_TEST_NXVALID (4) const unsigned char onlyxvalid[SECP256K1_EC_PARSE_TEST_NXVALID][64] = { { /* Valid if y overflow ignored (y = 1 mod p). (1/3) */ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6, 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, }, { /* Valid if y overflow ignored (y = 1 mod p). (2/3) */ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c, 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, }, { /* Valid if y overflow ignored (y = 1 mod p). (3/3)*/ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc, 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, }, { /* x on curve, y is from y^2 = x^3 + 8. */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03 } }; #define SECP256K1_EC_PARSE_TEST_NINVALID (7) const unsigned char invalid[SECP256K1_EC_PARSE_TEST_NINVALID][64] = { { /* x is third root of -8, y is -1 * (x^3+7); also on the curve for y^2 = x^3 + 9. */ 0x0a, 0x2d, 0x2b, 0xa9, 0x35, 0x07, 0xf1, 0xdf, 0x23, 0x37, 0x70, 0xc2, 0xa7, 0x97, 0x96, 0x2c, 0xc6, 0x1f, 0x6d, 0x15, 0xda, 0x14, 0xec, 0xd4, 0x7d, 0x8d, 0x27, 0xae, 0x1c, 0xd5, 0xf8, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }, { /* Valid if x overflow ignored (x = 1 mod p). */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14, 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee, }, { /* Valid if x overflow ignored (x = 1 mod p). */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb, 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41, }, { /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, 0xf4, 0x84, 0x14, 0x5c, 0xb0, 0x14, 0x9b, 0x82, 0x5d, 0xff, 0x41, 0x2f, 0xa0, 0x52, 0xa8, 0x3f, 0xcb, 0x72, 0xdb, 0x61, 0xd5, 0x6f, 0x37, 0x70, 0xce, 0x06, 0x6b, 0x73, 0x49, 0xa2, 0xaa, 0x28, }, { /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, 0x0b, 0x7b, 0xeb, 0xa3, 0x4f, 0xeb, 0x64, 0x7d, 0xa2, 0x00, 0xbe, 0xd0, 0x5f, 0xad, 0x57, 0xc0, 0x34, 0x8d, 0x24, 0x9e, 0x2a, 0x90, 0xc8, 0x8f, 0x31, 0xf9, 0x94, 0x8b, 0xb6, 0x5d, 0x52, 0x07, }, { /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x53, 0x7e, 0xef, 0xdf, 0xc1, 0x60, 0x6a, 0x07, 0x27, 0xcd, 0x69, 0xb4, 0xa7, 0x33, 0x3d, 0x38, 0xed, 0x44, 0xe3, 0x93, 0x2a, 0x71, 0x79, 0xee, 0xcb, 0x4b, 0x6f, 0xba, 0x93, 0x60, 0xdc, }, { /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xac, 0x81, 0x10, 0x20, 0x3e, 0x9f, 0x95, 0xf8, 0xd8, 0x32, 0x96, 0x4b, 0x58, 0xcc, 0xc2, 0xc7, 0x12, 0xbb, 0x1c, 0x6c, 0xd5, 0x8e, 0x86, 0x11, 0x34, 0xb4, 0x8f, 0x45, 0x6c, 0x9b, 0x53 } }; const unsigned char pubkeyc[66] = { /* Serialization of G. */ 0x04, 0x79, 0xBE, 0x66, 0x7E, 0xF9, 0xDC, 0xBB, 0xAC, 0x55, 0xA0, 0x62, 0x95, 0xCE, 0x87, 0x0B, 0x07, 0x02, 0x9B, 0xFC, 0xDB, 0x2D, 0xCE, 0x28, 0xD9, 0x59, 0xF2, 0x81, 0x5B, 0x16, 0xF8, 0x17, 0x98, 0x48, 0x3A, 0xDA, 0x77, 0x26, 0xA3, 0xC4, 0x65, 0x5D, 0xA4, 0xFB, 0xFC, 0x0E, 0x11, 0x08, 0xA8, 0xFD, 0x17, 0xB4, 0x48, 0xA6, 0x85, 0x54, 0x19, 0x9C, 0x47, 0xD0, 0x8F, 0xFB, 0x10, 0xD4, 0xB8, 0x00 }; unsigned char sout[65]; unsigned char shortkey[2]; secp256k1_ge ge; secp256k1_pubkey pubkey; size_t len; int32_t i; int32_t ecount; int32_t ecount2; ecount = 0; /* Nothing should be reading this far into pubkeyc. */ VG_UNDEF(&pubkeyc[65], 1); secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); /* Zero length claimed, fail, zeroize, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(shortkey, 2); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Length one claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 256 ; i++) { memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; shortkey[0] = i; VG_UNDEF(&shortkey[1], 1); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } /* Length two claimed, fail, zeroize, no illegal arg error. */ for (i = 0; i < 65536 ; i++) { memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; shortkey[0] = i & 255; shortkey[1] = i >> 8; VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); } memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); CHECK(ecount == 2); /* NULL input string. Illegal arg and zeroize output. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 1); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 2); /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */ memset(&pubkey, 0xfe, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); CHECK(ecount == 1); /* Valid parse. */ memset(&pubkey, 0, sizeof(pubkey)); ecount = 0; VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); CHECK(secp256k1_ec_pubkey_parse(secp256k1_context_no_precomp, &pubkey, pubkeyc, 65) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(ecount == 0); VG_UNDEF(&ge, sizeof(ge)); CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1); VG_CHECK(&ge.x, sizeof(ge.x)); VG_CHECK(&ge.y, sizeof(ge.y)); VG_CHECK(&ge.infinity, sizeof(ge.infinity)); ge_equals_ge(&secp256k1_ge_const_g, &ge); CHECK(ecount == 0); /* secp256k1_ec_pubkey_serialize illegal args. */ ecount = 0; len = 65; CHECK(secp256k1_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 1); CHECK(len == 0); CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); CHECK(ecount == 2); len = 65; VG_UNDEF(sout, 65); CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); VG_CHECK(sout, 65); CHECK(ecount == 3); CHECK(len == 0); len = 65; CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); CHECK(ecount == 4); CHECK(len == 0); len = 65; VG_UNDEF(sout, 65); CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); VG_CHECK(sout, 65); CHECK(ecount == 4); CHECK(len == 65); /* Multiple illegal args. Should still set arg error only once. */ ecount = 0; ecount2 = 11; CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); /* Does the illegal arg callback actually change the behavior? */ secp256k1_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); CHECK(ecount == 1); CHECK(ecount2 == 10); secp256k1_context_set_illegal_callback(ctx, NULL, NULL); /* Try a bunch of prefabbed points with all possible encodings. */ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) { ec_pubkey_parse_pointtest(valid[i], 1, 1); } for (i = 0; i < SECP256K1_EC_PARSE_TEST_NXVALID; i++) { ec_pubkey_parse_pointtest(onlyxvalid[i], 1, 0); } for (i = 0; i < SECP256K1_EC_PARSE_TEST_NINVALID; i++) { ec_pubkey_parse_pointtest(invalid[i], 0, 0); } } void run_eckey_edge_case_test(void) { const unsigned char orderc[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; const unsigned char zeros[sizeof(secp256k1_pubkey)] = {0x00}; unsigned char ctmp[33]; unsigned char ctmp2[33]; secp256k1_pubkey pubkey; secp256k1_pubkey pubkey2; secp256k1_pubkey pubkey_one; secp256k1_pubkey pubkey_negone; const secp256k1_pubkey *pubkeys[3]; size_t len; int32_t ecount; /* Group order is too large, reject. */ CHECK(secp256k1_ec_seckey_verify(ctx, orderc) == 0); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* Maximum value is too large, reject. */ memset(ctmp, 255, 32); CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* Zero is too small, reject. */ memset(ctmp, 0, 32); CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* One must be accepted. */ ctmp[31] = 0x01; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); pubkey_one = pubkey; /* Group order + 1 is too large, reject. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x42; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); memset(&pubkey, 1, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* -1 must be accepted. */ ctmp[31] = 0x40; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); memset(&pubkey, 0, sizeof(pubkey)); VG_UNDEF(&pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); VG_CHECK(&pubkey, sizeof(pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); pubkey_negone = pubkey; /* Tweak of zero leaves the value unchanged. */ memset(ctmp2, 0, 32); CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 1); CHECK(secp256k1_memcmp_var(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); memcpy(&pubkey2, &pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Multiply tweak of zero zeroizes the output. */ CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing seckey, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); memset(ctmp2, 0, 32); ctmp2[31] = 0x01; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp2) == 1); CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, ctmp2) == 0); CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, ctmp2) == 0); CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); /* If seckey_tweak_add or seckey_tweak_mul are called with an overflowing tweak, the seckey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, orderc) == 0); CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, orderc) == 0); CHECK(secp256k1_memcmp_var(zeros, ctmp, 32) == 0); memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; /* If pubkey_tweak_add or pubkey_tweak_mul are called with an overflowing tweak, the pubkey is zeroized. */ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* If the resulting key in secp256k1_ec_seckey_tweak_add and * secp256k1_ec_pubkey_tweak_add is 0 the functions fail and in the latter * case the pubkey is zeroized. */ memcpy(ctmp, orderc, 32); ctmp[31] = 0x40; memset(ctmp2, 0, 32); ctmp2[31] = 1; CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 0); CHECK(secp256k1_memcmp_var(zeros, ctmp2, 32) == 0); ctmp2[31] = 1; CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); /* Tweak computation wraps and results in a key of 1. */ ctmp2[31] = 2; CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp2, ctmp) == 1); CHECK(secp256k1_memcmp_var(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); ctmp2[31] = 2; CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 1; CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Tweak mul * 2 = 1+1. */ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); ctmp2[31] = 2; CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); /* Test argument errors. */ ecount = 0; secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); CHECK(ecount == 0); /* Zeroize pubkey on parse error. */ memset(&pubkey, 0, 32); CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); CHECK(ecount == 1); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(pubkey)) == 0); memcpy(&pubkey, &pubkey2, sizeof(pubkey)); memset(&pubkey2, 0, 32); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); CHECK(ecount == 2); CHECK(secp256k1_memcmp_var(&pubkey2, zeros, sizeof(pubkey2)) == 0); /* Plain argument errors. */ ecount = 0; CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); CHECK(ecount == 0); CHECK(secp256k1_ec_seckey_verify(ctx, NULL) == 0); CHECK(ecount == 1); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; CHECK(secp256k1_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 4; CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); CHECK(secp256k1_ec_seckey_tweak_add(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); CHECK(secp256k1_ec_seckey_tweak_add(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; memset(ctmp2, 0, 32); ctmp2[31] = 1; CHECK(secp256k1_ec_seckey_tweak_mul(ctx, NULL, ctmp2) == 0); CHECK(ecount == 1); CHECK(secp256k1_ec_seckey_tweak_mul(ctx, ctmp, NULL) == 0); CHECK(ecount == 2); ecount = 0; CHECK(secp256k1_ec_pubkey_create(ctx, NULL, ctmp) == 0); CHECK(ecount == 1); memset(&pubkey, 1, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 2); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); /* secp256k1_ec_pubkey_combine tests. */ ecount = 0; pubkeys[0] = &pubkey_one; VG_UNDEF(&pubkeys[0], sizeof(secp256k1_pubkey *)); VG_UNDEF(&pubkeys[1], sizeof(secp256k1_pubkey *)); VG_UNDEF(&pubkeys[2], sizeof(secp256k1_pubkey *)); memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 1); CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 2); memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 3); pubkeys[0] = &pubkey_negone; memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); CHECK(ecount == 3); len = 33; CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0); /* Result is infinity. */ pubkeys[0] = &pubkey_one; pubkeys[1] = &pubkey_negone; memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); CHECK(ecount == 3); /* Passes through infinity but comes out one. */ pubkeys[2] = &pubkey_one; memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); CHECK(ecount == 3); len = 33; CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); CHECK(secp256k1_memcmp_var(ctmp, ctmp2, 33) == 0); /* Adds to two. */ pubkeys[1] = &pubkey_one; memset(&pubkey, 255, sizeof(secp256k1_pubkey)); VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); CHECK(secp256k1_memcmp_var(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); CHECK(ecount == 3); secp256k1_context_set_illegal_callback(ctx, NULL, NULL); } void run_eckey_negate_test(void) { unsigned char seckey[32]; unsigned char seckey_tmp[32]; random_scalar_order_b32(seckey); memcpy(seckey_tmp, seckey, 32); /* Verify negation changes the key and changes it back */ CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1); CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) != 0); CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1); CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Check that privkey alias gives same result */ CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 1); CHECK(secp256k1_ec_privkey_negate(ctx, seckey_tmp) == 1); CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating all 0s fails */ memset(seckey, 0, 32); memset(seckey_tmp, 0, 32); CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0); /* Check that seckey is not modified */ CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); /* Negating an overflowing seckey fails and the seckey is zeroed. In this * test, the seckey has 16 random bytes to ensure that ec_seckey_negate * doesn't just set seckey to a constant value in case of failure. */ random_scalar_order_b32(seckey); memset(seckey, 0xFF, 16); memset(seckey_tmp, 0, 32); CHECK(secp256k1_ec_seckey_negate(ctx, seckey) == 0); CHECK(secp256k1_memcmp_var(seckey, seckey_tmp, 32) == 0); } void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) { secp256k1_scalar nonce; do { random_scalar_order_test(&nonce); } while(!secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); } void test_ecdsa_sign_verify(void) { secp256k1_gej pubj; secp256k1_ge pub; secp256k1_scalar one; secp256k1_scalar msg, key; secp256k1_scalar sigr, sigs; int recid; int getrec; random_scalar_order_test(&msg); random_scalar_order_test(&key); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); secp256k1_ge_set_gej(&pub, &pubj); getrec = secp256k1_testrand_bits(1); random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL); if (getrec) { CHECK(recid >= 0 && recid < 4); } CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); secp256k1_scalar_set_int(&one, 1); secp256k1_scalar_add(&msg, &msg, &one); CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); } void run_ecdsa_sign_verify(void) { int i; for (i = 0; i < 10*count; i++) { test_ecdsa_sign_verify(); } } /** Dummy nonce generation function that just uses a precomputed nonce, and fails if it is not accepted. Use only for testing. */ static int precomputed_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { (void)msg32; (void)key32; (void)algo16; memcpy(nonce32, data, 32); return (counter == 0); } static int nonce_function_test_fail(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { /* Dummy nonce generator that has a fatal error on the first counter value. */ if (counter == 0) { return 0; } return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 1); } static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { /* Dummy nonce generator that produces unacceptable nonces for the first several counter values. */ if (counter < 3) { memset(nonce32, counter==0 ? 0 : 255, 32); if (counter == 2) { nonce32[31]--; } return 1; } if (counter < 5) { static const unsigned char order[] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 }; memcpy(nonce32, order, 32); if (counter == 4) { nonce32[31]++; } return 1; } /* Retry rate of 6979 is negligible esp. as we only call this in deterministic tests. */ /* If someone does fine a case where it retries for secp256k1, we'd like to know. */ if (counter > 5) { return 0; } return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5); } int is_empty_signature(const secp256k1_ecdsa_signature *sig) { static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0}; return secp256k1_memcmp_var(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0; } void test_ecdsa_end_to_end(void) { unsigned char extra[32] = {0x00}; unsigned char privkey[32]; unsigned char message[32]; unsigned char privkey2[32]; secp256k1_ecdsa_signature signature[6]; secp256k1_scalar r, s; unsigned char sig[74]; size_t siglen = 74; unsigned char pubkeyc[65]; size_t pubkeyclen = 65; secp256k1_pubkey pubkey; secp256k1_pubkey pubkey_tmp; unsigned char seckey[300]; size_t seckeylen = 300; /* Generate a random key and message. */ { secp256k1_scalar msg, key; random_scalar_order_test(&msg); random_scalar_order_test(&key); secp256k1_scalar_get_b32(privkey, &key); secp256k1_scalar_get_b32(message, &msg); } /* Construct and verify corresponding public key. */ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); /* Verify exporting and importing public key. */ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); memset(&pubkey, 0, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); /* Verify negation changes the key and changes it back */ memcpy(&pubkey_tmp, &pubkey, sizeof(pubkey)); CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) != 0); CHECK(secp256k1_ec_pubkey_negate(ctx, &pubkey_tmp) == 1); CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0); /* Verify private key import and export. */ CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_testrand_bits(1) == 1)); CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); CHECK(secp256k1_memcmp_var(privkey, privkey2, 32) == 0); /* Optionally tweak the keys using addition. */ if (secp256k1_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; secp256k1_pubkey pubkey2; secp256k1_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); ret1 = secp256k1_ec_seckey_tweak_add(ctx, privkey, rnd); ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd); /* Check that privkey alias gives same result */ ret3 = secp256k1_ec_privkey_tweak_add(ctx, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Optionally tweak the keys using multiplication. */ if (secp256k1_testrand_int(3) == 0) { int ret1; int ret2; int ret3; unsigned char rnd[32]; unsigned char privkey_tmp[32]; secp256k1_pubkey pubkey2; secp256k1_testrand256_test(rnd); memcpy(privkey_tmp, privkey, 32); ret1 = secp256k1_ec_seckey_tweak_mul(ctx, privkey, rnd); ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); /* Check that privkey alias gives same result */ ret3 = secp256k1_ec_privkey_tweak_mul(ctx, privkey_tmp, rnd); CHECK(ret1 == ret2); CHECK(ret2 == ret3); if (ret1 == 0) { return; } CHECK(secp256k1_memcmp_var(privkey, privkey_tmp, 32) == 0); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); CHECK(secp256k1_memcmp_var(&pubkey, &pubkey2, sizeof(pubkey)) == 0); } /* Sign. */ CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); CHECK(secp256k1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); CHECK(secp256k1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); extra[31] = 1; CHECK(secp256k1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); extra[31] = 0; extra[0] = 1; CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); CHECK(secp256k1_memcmp_var(&signature[0], &signature[4], sizeof(signature[0])) == 0); CHECK(secp256k1_memcmp_var(&signature[0], &signature[1], sizeof(signature[0])) != 0); CHECK(secp256k1_memcmp_var(&signature[0], &signature[2], sizeof(signature[0])) != 0); CHECK(secp256k1_memcmp_var(&signature[0], &signature[3], sizeof(signature[0])) != 0); CHECK(secp256k1_memcmp_var(&signature[1], &signature[2], sizeof(signature[0])) != 0); CHECK(secp256k1_memcmp_var(&signature[1], &signature[3], sizeof(signature[0])) != 0); CHECK(secp256k1_memcmp_var(&signature[2], &signature[3], sizeof(signature[0])) != 0); /* Verify. */ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); CHECK(secp256k1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); CHECK(secp256k1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); /* Test lower-S form, malleate, verify and fail, test again, malleate again */ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[0])); secp256k1_ecdsa_signature_load(ctx, &r, &s, &signature[0]); secp256k1_scalar_negate(&s, &s); secp256k1_ecdsa_signature_save(&signature[5], &r, &s); CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); CHECK(secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); CHECK(!secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); secp256k1_scalar_negate(&s, &s); secp256k1_ecdsa_signature_save(&signature[5], &r, &s); CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); CHECK(secp256k1_memcmp_var(&signature[5], &signature[0], 64) == 0); /* Serialize/parse DER and verify again */ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); memset(&signature[0], 0, sizeof(signature[0])); CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); /* Serialize/destroy/parse DER and verify again. */ siglen = 74; CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); sig[secp256k1_testrand_int(siglen)] += 1 + secp256k1_testrand_int(255); CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); } void test_random_pubkeys(void) { secp256k1_ge elem; secp256k1_ge elem2; unsigned char in[65]; /* Generate some randomly sized pubkeys. */ size_t len = secp256k1_testrand_bits(2) == 0 ? 65 : 33; if (secp256k1_testrand_bits(2) == 0) { len = secp256k1_testrand_bits(6); } if (len == 65) { in[0] = secp256k1_testrand_bits(1) ? 4 : (secp256k1_testrand_bits(1) ? 6 : 7); } else { in[0] = secp256k1_testrand_bits(1) ? 2 : 3; } if (secp256k1_testrand_bits(3) == 0) { in[0] = secp256k1_testrand_bits(8); } if (len > 1) { secp256k1_testrand256(&in[1]); } if (len > 33) { secp256k1_testrand256(&in[33]); } if (secp256k1_eckey_pubkey_parse(&elem, in, len)) { unsigned char out[65]; unsigned char firstb; int res; size_t size = len; firstb = in[0]; /* If the pubkey can be parsed, it should round-trip... */ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33)); CHECK(size == len); CHECK(secp256k1_memcmp_var(&in[1], &out[1], len-1) == 0); /* ... except for the type of hybrid inputs. */ if ((in[0] != 6) && (in[0] != 7)) { CHECK(in[0] == out[0]); } size = 65; CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0)); CHECK(size == 65); CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size)); ge_equals_ge(&elem,&elem2); /* Check that the X9.62 hybrid type is checked. */ in[0] = secp256k1_testrand_bits(1) ? 6 : 7; res = secp256k1_eckey_pubkey_parse(&elem2, in, size); if (firstb == 2 || firstb == 3) { if (in[0] == firstb + 4) { CHECK(res); } else { CHECK(!res); } } if (res) { ge_equals_ge(&elem,&elem2); CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0)); CHECK(secp256k1_memcmp_var(&in[1], &out[1], 64) == 0); } } } void run_random_pubkeys(void) { int i; for (i = 0; i < 10*count; i++) { test_random_pubkeys(); } } void run_ecdsa_end_to_end(void) { int i; for (i = 0; i < 64*count; i++) { test_ecdsa_end_to_end(); } } int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_der, int certainly_not_der) { static const unsigned char zeroes[32] = {0}; #ifdef ENABLE_OPENSSL_TESTS static const unsigned char max_scalar[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40 }; #endif int ret = 0; secp256k1_ecdsa_signature sig_der; unsigned char roundtrip_der[2048]; unsigned char compact_der[64]; size_t len_der = 2048; int parsed_der = 0, valid_der = 0, roundtrips_der = 0; secp256k1_ecdsa_signature sig_der_lax; unsigned char roundtrip_der_lax[2048]; unsigned char compact_der_lax[64]; size_t len_der_lax = 2048; int parsed_der_lax = 0, valid_der_lax = 0, roundtrips_der_lax = 0; #ifdef ENABLE_OPENSSL_TESTS ECDSA_SIG *sig_openssl; const BIGNUM *r = NULL, *s = NULL; const unsigned char *sigptr; unsigned char roundtrip_openssl[2048]; int len_openssl = 2048; int parsed_openssl, valid_openssl = 0, roundtrips_openssl = 0; #endif parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); if (parsed_der) { ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; valid_der = (secp256k1_memcmp_var(compact_der, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der + 32, zeroes, 32) != 0); } if (valid_der) { ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; roundtrips_der = (len_der == siglen) && secp256k1_memcmp_var(roundtrip_der, sig, siglen) == 0; } parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); if (parsed_der_lax) { ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; valid_der_lax = (secp256k1_memcmp_var(compact_der_lax, zeroes, 32) != 0) && (secp256k1_memcmp_var(compact_der_lax + 32, zeroes, 32) != 0); } if (valid_der_lax) { ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; roundtrips_der_lax = (len_der_lax == siglen) && secp256k1_memcmp_var(roundtrip_der_lax, sig, siglen) == 0; } if (certainly_der) { ret |= (!parsed_der) << 2; } if (certainly_not_der) { ret |= (parsed_der) << 17; } if (valid_der) { ret |= (!roundtrips_der) << 3; } if (valid_der) { ret |= (!roundtrips_der_lax) << 12; ret |= (len_der != len_der_lax) << 13; ret |= ((len_der != len_der_lax) || (secp256k1_memcmp_var(roundtrip_der_lax, roundtrip_der, len_der) != 0)) << 14; } ret |= (roundtrips_der != roundtrips_der_lax) << 15; if (parsed_der) { ret |= (!parsed_der_lax) << 16; } #ifdef ENABLE_OPENSSL_TESTS sig_openssl = ECDSA_SIG_new(); sigptr = sig; parsed_openssl = (d2i_ECDSA_SIG(&sig_openssl, &sigptr, siglen) != NULL); if (parsed_openssl) { ECDSA_SIG_get0(sig_openssl, &r, &s); valid_openssl = !BN_is_negative(r) && !BN_is_negative(s) && BN_num_bits(r) > 0 && BN_num_bits(r) <= 256 && BN_num_bits(s) > 0 && BN_num_bits(s) <= 256; if (valid_openssl) { unsigned char tmp[32] = {0}; BN_bn2bin(r, tmp + 32 - BN_num_bytes(r)); valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0; } if (valid_openssl) { unsigned char tmp[32] = {0}; BN_bn2bin(s, tmp + 32 - BN_num_bytes(s)); valid_openssl = secp256k1_memcmp_var(tmp, max_scalar, 32) < 0; } } len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL); if (len_openssl <= 2048) { unsigned char *ptr = roundtrip_openssl; CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl); roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (secp256k1_memcmp_var(roundtrip_openssl, sig, siglen) == 0); } else { len_openssl = 0; } ECDSA_SIG_free(sig_openssl); ret |= (parsed_der && !parsed_openssl) << 4; ret |= (valid_der && !valid_openssl) << 5; ret |= (roundtrips_openssl && !parsed_der) << 6; ret |= (roundtrips_der != roundtrips_openssl) << 7; if (roundtrips_openssl) { ret |= (len_der != (size_t)len_openssl) << 8; ret |= ((len_der != (size_t)len_openssl) || (secp256k1_memcmp_var(roundtrip_der, roundtrip_openssl, len_der) != 0)) << 9; } #endif return ret; } static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { size_t i; for (i = 0; i < ptrlen; i++) { int shift = ptrlen - 1 - i; if (shift >= 4) { ptr[i] = 0; } else { ptr[i] = (val >> shift) & 0xFF; } } } static void damage_array(unsigned char *sig, size_t *len) { int pos; int action = secp256k1_testrand_bits(3); if (action < 1 && *len > 3) { /* Delete a byte. */ pos = secp256k1_testrand_int(*len); memmove(sig + pos, sig + pos + 1, *len - pos - 1); (*len)--; return; } else if (action < 2 && *len < 2048) { /* Insert a byte. */ pos = secp256k1_testrand_int(1 + *len); memmove(sig + pos + 1, sig + pos, *len - pos); sig[pos] = secp256k1_testrand_bits(8); (*len)++; return; } else if (action < 4) { /* Modify a byte. */ sig[secp256k1_testrand_int(*len)] += 1 + secp256k1_testrand_int(255); return; } else { /* action < 8 */ /* Modify a bit. */ sig[secp256k1_testrand_int(*len)] ^= 1 << secp256k1_testrand_bits(3); return; } } static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly_der, int* certainly_not_der) { int der; int nlow[2], nlen[2], nlenlen[2], nhbit[2], nhbyte[2], nzlen[2]; size_t tlen, elen, glen; int indet; int n; *len = 0; der = secp256k1_testrand_bits(2) == 0; *certainly_der = der; *certainly_not_der = 0; indet = der ? 0 : secp256k1_testrand_int(10) == 0; for (n = 0; n < 2; n++) { /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ nlow[n] = der ? 1 : (secp256k1_testrand_bits(3) != 0); /* The length of the number in bytes (the first byte of which will always be nonzero) */ nlen[n] = nlow[n] ? secp256k1_testrand_int(33) : 32 + secp256k1_testrand_int(200) * secp256k1_testrand_int(8) / 8; CHECK(nlen[n] <= 232); /* The top bit of the number. */ nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_testrand_bits(1)); /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_testrand_bits(7) : 1 + secp256k1_testrand_int(127)); /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_testrand_int(3) : secp256k1_testrand_int(300 - nlen[n]) * secp256k1_testrand_int(8) / 8); if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { *certainly_not_der = 1; } CHECK(nlen[n] + nzlen[n] <= 300); /* The length of the length descriptor for the number. 0 means short encoding, anything else is long encoding. */ nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); if (!der) { /* nlenlen[n] max 127 bytes */ int add = secp256k1_testrand_int(127 - nlenlen[n]) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256; nlenlen[n] += add; if (add != 0) { *certainly_not_der = 1; } } CHECK(nlen[n] + nzlen[n] + nlenlen[n] <= 427); } /* The total length of the data to go, so far */ tlen = 2 + nlenlen[0] + nlen[0] + nzlen[0] + 2 + nlenlen[1] + nlen[1] + nzlen[1]; CHECK(tlen <= 856); /* The length of the garbage inside the tuple. */ elen = (der || indet) ? 0 : secp256k1_testrand_int(980 - tlen) * secp256k1_testrand_int(8) / 8; if (elen != 0) { *certainly_not_der = 1; } tlen += elen; CHECK(tlen <= 980); /* The length of the garbage after the end of the tuple. */ glen = der ? 0 : secp256k1_testrand_int(990 - tlen) * secp256k1_testrand_int(8) / 8; if (glen != 0) { *certainly_not_der = 1; } CHECK(tlen + glen <= 990); /* Write the tuple header. */ sig[(*len)++] = 0x30; if (indet) { /* Indeterminate length */ sig[(*len)++] = 0x80; *certainly_not_der = 1; } else { int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); if (!der) { int add = secp256k1_testrand_int(127 - tlenlen) * secp256k1_testrand_int(16) * secp256k1_testrand_int(16) / 256; tlenlen += add; if (add != 0) { *certainly_not_der = 1; } } if (tlenlen == 0) { /* Short length notation */ sig[(*len)++] = tlen; } else { /* Long length notation */ sig[(*len)++] = 128 + tlenlen; assign_big_endian(sig + *len, tlenlen, tlen); *len += tlenlen; } tlen += tlenlen; } tlen += 2; CHECK(tlen + glen <= 1119); for (n = 0; n < 2; n++) { /* Write the integer header. */ sig[(*len)++] = 0x02; if (nlenlen[n] == 0) { /* Short length notation */ sig[(*len)++] = nlen[n] + nzlen[n]; } else { /* Long length notation. */ sig[(*len)++] = 128 + nlenlen[n]; assign_big_endian(sig + *len, nlenlen[n], nlen[n] + nzlen[n]); *len += nlenlen[n]; } /* Write zero padding */ while (nzlen[n] > 0) { sig[(*len)++] = 0x00; nzlen[n]--; } if (nlen[n] == 32 && !nlow[n]) { /* Special extra 16 0xFF bytes in "high" 32-byte numbers */ int i; for (i = 0; i < 16; i++) { sig[(*len)++] = 0xFF; } nlen[n] -= 16; } /* Write first byte of number */ if (nlen[n] > 0) { sig[(*len)++] = nhbyte[n]; nlen[n]--; } /* Generate remaining random bytes of number */ secp256k1_testrand_bytes_test(sig + *len, nlen[n]); *len += nlen[n]; nlen[n] = 0; } /* Generate random garbage inside tuple. */ secp256k1_testrand_bytes_test(sig + *len, elen); *len += elen; /* Generate end-of-contents bytes. */ if (indet) { sig[(*len)++] = 0; sig[(*len)++] = 0; tlen += 2; } CHECK(tlen + glen <= 1121); /* Generate random garbage outside tuple. */ secp256k1_testrand_bytes_test(sig + *len, glen); *len += glen; tlen += glen; CHECK(tlen <= 1121); CHECK(tlen == *len); } void run_ecdsa_der_parse(void) { int i,j; for (i = 0; i < 200 * count; i++) { unsigned char buffer[2048]; size_t buflen = 0; int certainly_der = 0; int certainly_not_der = 0; random_ber_signature(buffer, &buflen, &certainly_der, &certainly_not_der); CHECK(buflen <= 2048); for (j = 0; j < 16; j++) { int ret = 0; if (j > 0) { damage_array(buffer, &buflen); /* We don't know anything anymore about the DERness of the result */ certainly_der = 0; certainly_not_der = 0; } ret = test_ecdsa_der_parse(buffer, buflen, certainly_der, certainly_not_der); if (ret != 0) { size_t k; fprintf(stderr, "Failure %x on ", ret); for (k = 0; k < buflen; k++) { fprintf(stderr, "%02x ", buffer[k]); } fprintf(stderr, "\n"); } CHECK(ret == 0); } } } /* Tests several edge cases. */ void test_ecdsa_edge_cases(void) { int t; secp256k1_ecdsa_signature sig; /* Test the case where ECDSA recomputes a point that is infinity. */ { secp256k1_gej keyj; secp256k1_ge key; secp256k1_scalar msg; secp256k1_scalar sr, ss; secp256k1_scalar_set_int(&ss, 1); secp256k1_scalar_negate(&ss, &ss); secp256k1_scalar_inverse(&ss, &ss); secp256k1_scalar_set_int(&sr, 1); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); secp256k1_ge_set_gej(&key, &keyj); msg = ss; CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with r of zero fails. */ { const unsigned char pubkey_mods_zero[33] = { 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 }; secp256k1_ge key; secp256k1_scalar msg; secp256k1_scalar sr, ss; secp256k1_scalar_set_int(&ss, 1); secp256k1_scalar_set_int(&msg, 0); secp256k1_scalar_set_int(&sr, 0); CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with s of zero fails. */ { const unsigned char pubkey[33] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }; secp256k1_ge key; secp256k1_scalar msg; secp256k1_scalar sr, ss; secp256k1_scalar_set_int(&ss, 0); secp256k1_scalar_set_int(&msg, 0); secp256k1_scalar_set_int(&sr, 1); CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Verify signature with message 0 passes. */ { const unsigned char pubkey[33] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }; const unsigned char pubkey2[33] = { 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x43 }; secp256k1_ge key; secp256k1_ge key2; secp256k1_scalar msg; secp256k1_scalar sr, ss; secp256k1_scalar_set_int(&ss, 2); secp256k1_scalar_set_int(&msg, 0); secp256k1_scalar_set_int(&sr, 2); CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33)); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); secp256k1_scalar_negate(&ss, &ss); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); secp256k1_scalar_set_int(&ss, 1); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); } /* Verify signature with message 1 passes. */ { const unsigned char pubkey[33] = { 0x02, 0x14, 0x4e, 0x5a, 0x58, 0xef, 0x5b, 0x22, 0x6f, 0xd2, 0xe2, 0x07, 0x6a, 0x77, 0xcf, 0x05, 0xb4, 0x1d, 0xe7, 0x4a, 0x30, 0x98, 0x27, 0x8c, 0x93, 0xe6, 0xe6, 0x3c, 0x0b, 0xc4, 0x73, 0x76, 0x25 }; const unsigned char pubkey2[33] = { 0x02, 0x8a, 0xd5, 0x37, 0xed, 0x73, 0xd9, 0x40, 0x1d, 0xa0, 0x33, 0xd2, 0xdc, 0xf0, 0xaf, 0xae, 0x34, 0xcf, 0x5f, 0x96, 0x4c, 0x73, 0x28, 0x0f, 0x92, 0xc0, 0xf6, 0x9d, 0xd9, 0xb2, 0x09, 0x10, 0x62 }; const unsigned char csr[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb }; secp256k1_ge key; secp256k1_ge key2; secp256k1_scalar msg; secp256k1_scalar sr, ss; secp256k1_scalar_set_int(&ss, 1); secp256k1_scalar_set_int(&msg, 1); secp256k1_scalar_set_b32(&sr, csr, NULL); CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33)); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); secp256k1_scalar_negate(&ss, &ss); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); secp256k1_scalar_set_int(&ss, 2); secp256k1_scalar_inverse_var(&ss, &ss); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); } /* Verify signature with message -1 passes. */ { const unsigned char pubkey[33] = { 0x03, 0xaf, 0x97, 0xff, 0x7d, 0x3a, 0xf6, 0xa0, 0x02, 0x94, 0xbd, 0x9f, 0x4b, 0x2e, 0xd7, 0x52, 0x28, 0xdb, 0x49, 0x2a, 0x65, 0xcb, 0x1e, 0x27, 0x57, 0x9c, 0xba, 0x74, 0x20, 0xd5, 0x1d, 0x20, 0xf1 }; const unsigned char csr[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee }; secp256k1_ge key; secp256k1_scalar msg; secp256k1_scalar sr, ss; secp256k1_scalar_set_int(&ss, 1); secp256k1_scalar_set_int(&msg, 1); secp256k1_scalar_negate(&msg, &msg); secp256k1_scalar_set_b32(&sr, csr, NULL); CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); secp256k1_scalar_negate(&ss, &ss); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); secp256k1_scalar_set_int(&ss, 3); secp256k1_scalar_inverse_var(&ss, &ss); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); } /* Signature where s would be zero. */ { secp256k1_pubkey pubkey; size_t siglen; int32_t ecount; unsigned char signature[72]; static const unsigned char nonce[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; static const unsigned char nonce2[32] = { 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x40 }; const unsigned char key[32] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, }; unsigned char msg[32] = { 0x86, 0x41, 0x99, 0x81, 0x06, 0x23, 0x44, 0x53, 0xaa, 0x5f, 0x9d, 0x6a, 0x31, 0x78, 0xf4, 0xf7, 0xb8, 0x12, 0xe0, 0x0b, 0x81, 0x7a, 0x77, 0x62, 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9, }; ecount = 0; secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); msg[31] = 0xaa; CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); CHECK(ecount == 0); CHECK(secp256k1_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 1); CHECK(secp256k1_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 2); CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); CHECK(ecount == 3); CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, key) == 1); CHECK(secp256k1_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); CHECK(ecount == 4); CHECK(secp256k1_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); CHECK(ecount == 5); CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, NULL) == 0); CHECK(ecount == 6); CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); CHECK(ecount == 6); CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); CHECK(ecount == 7); /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); CHECK(ecount == 8); siglen = 72; CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); CHECK(ecount == 9); CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); CHECK(ecount == 10); CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); CHECK(ecount == 11); CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); CHECK(ecount == 11); CHECK(secp256k1_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); CHECK(ecount == 12); CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); CHECK(ecount == 13); CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); CHECK(ecount == 13); siglen = 10; /* Too little room for a signature does not fail via ARGCHECK. */ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); CHECK(ecount == 13); ecount = 0; CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); CHECK(ecount == 1); CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); CHECK(ecount == 2); CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); CHECK(ecount == 3); CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); CHECK(ecount == 3); CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); CHECK(ecount == 4); CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); CHECK(ecount == 5); CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); CHECK(ecount == 5); memset(signature, 255, 64); CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); CHECK(ecount == 5); secp256k1_context_set_illegal_callback(ctx, NULL, NULL); } /* Nonce function corner cases. */ for (t = 0; t < 2; t++) { static const unsigned char zero[32] = {0x00}; int i; unsigned char key[32]; unsigned char msg[32]; secp256k1_ecdsa_signature sig2; secp256k1_scalar sr[512], ss; const unsigned char *extra; extra = t == 0 ? NULL : zero; memset(msg, 0, 32); msg[31] = 1; /* High key results in signature failure. */ memset(key, 0xFF, 32); CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Zero key results in signature failure. */ memset(key, 0, 32); CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); CHECK(is_empty_signature(&sig)); /* Nonce function failure results in signature failure. */ key[31] = 1; CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); CHECK(is_empty_signature(&sig)); /* The retry loop successfully makes its way to the first good value. */ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); CHECK(!is_empty_signature(&sig)); CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); CHECK(!is_empty_signature(&sig2)); CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function is deterministic. */ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); CHECK(secp256k1_memcmp_var(&sig, &sig2, sizeof(sig)) == 0); /* The default nonce function changes output with different messages. */ for(i = 0; i < 256; i++) { int j; msg[0] = i; CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j])); } } msg[0] = 0; msg[31] = 2; /* The default nonce function changes output with different keys. */ for(i = 256; i < 512; i++) { int j; key[0] = i - 256; CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); CHECK(!is_empty_signature(&sig2)); secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); for (j = 0; j < i; j++) { CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j])); } } key[0] = 0; } { /* Check that optional nonce arguments do not have equivalent effect. */ const unsigned char zeros[32] = {0}; unsigned char nonce[32]; unsigned char nonce2[32]; unsigned char nonce3[32]; unsigned char nonce4[32]; VG_UNDEF(nonce,32); VG_UNDEF(nonce2,32); VG_UNDEF(nonce3,32); VG_UNDEF(nonce4,32); CHECK(nonce_function_rfc6979(nonce, zeros, zeros, NULL, NULL, 0) == 1); VG_CHECK(nonce,32); CHECK(nonce_function_rfc6979(nonce2, zeros, zeros, zeros, NULL, 0) == 1); VG_CHECK(nonce2,32); CHECK(nonce_function_rfc6979(nonce3, zeros, zeros, NULL, (void *)zeros, 0) == 1); VG_CHECK(nonce3,32); CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1); VG_CHECK(nonce4,32); CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0); CHECK(secp256k1_memcmp_var(nonce, nonce3, 32) != 0); CHECK(secp256k1_memcmp_var(nonce, nonce4, 32) != 0); CHECK(secp256k1_memcmp_var(nonce2, nonce3, 32) != 0); CHECK(secp256k1_memcmp_var(nonce2, nonce4, 32) != 0); CHECK(secp256k1_memcmp_var(nonce3, nonce4, 32) != 0); } /* Privkey export where pubkey is the point at infinity. */ { unsigned char privkey[300]; unsigned char seckey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41, }; size_t outlen = 300; CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 0)); outlen = 300; CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 1)); } } void run_ecdsa_edge_cases(void) { test_ecdsa_edge_cases(); } #ifdef ENABLE_OPENSSL_TESTS EC_KEY *get_openssl_key(const unsigned char *key32) { unsigned char privkey[300]; size_t privkeylen; const unsigned char* pbegin = privkey; int compr = secp256k1_testrand_bits(1); EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1); CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr)); CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen)); CHECK(EC_KEY_check_key(ec_key)); return ec_key; } void test_ecdsa_openssl(void) { secp256k1_gej qj; secp256k1_ge q; secp256k1_scalar sigr, sigs; secp256k1_scalar one; secp256k1_scalar msg2; secp256k1_scalar key, msg; EC_KEY *ec_key; unsigned int sigsize = 80; size_t secp_sigsize = 80; unsigned char message[32]; unsigned char signature[80]; unsigned char key32[32]; secp256k1_testrand256_test(message); secp256k1_scalar_set_b32(&msg, message, NULL); random_scalar_order_test(&key); secp256k1_scalar_get_b32(key32, &key); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key); secp256k1_ge_set_gej(&q, &qj); ec_key = get_openssl_key(key32); CHECK(ec_key != NULL); CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key)); CHECK(secp256k1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize)); CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg)); secp256k1_scalar_set_int(&one, 1); secp256k1_scalar_add(&msg2, &msg, &one); CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2)); random_sign(&sigr, &sigs, &key, &msg, NULL); CHECK(secp256k1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs)); CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1); EC_KEY_free(ec_key); } void run_ecdsa_openssl(void) { int i; for (i = 0; i < 10*count; i++) { test_ecdsa_openssl(); } } #endif #ifdef ENABLE_MODULE_ECDH # include "modules/ecdh/tests_impl.h" #endif #ifdef ENABLE_MODULE_MULTISET # include "modules/multiset/tests_impl.h" #endif #ifdef ENABLE_MODULE_RECOVERY # include "modules/recovery/tests_impl.h" #endif #ifdef ENABLE_MODULE_SCHNORR # include "modules/schnorr/tests_impl.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS # include "modules/extrakeys/tests_impl.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG # include "modules/schnorrsig/tests_impl.h" #endif void run_memczero_test(void) { unsigned char buf1[6] = {1, 2, 3, 4, 5, 6}; unsigned char buf2[sizeof(buf1)]; /* memczero(..., ..., 0) is a noop. */ memcpy(buf2, buf1, sizeof(buf1)); memczero(buf1, sizeof(buf1), 0); CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); /* memczero(..., ..., 1) zeros the buffer. */ memset(buf2, 0, sizeof(buf2)); memczero(buf1, sizeof(buf1) , 1); CHECK(secp256k1_memcmp_var(buf1, buf2, sizeof(buf1)) == 0); } void int_cmov_test(void) { int r = INT_MAX; int a = 0; secp256k1_int_cmov(&r, &a, 0); CHECK(r == INT_MAX); r = 0; a = INT_MAX; secp256k1_int_cmov(&r, &a, 1); CHECK(r == INT_MAX); a = 0; secp256k1_int_cmov(&r, &a, 1); CHECK(r == 0); a = 1; secp256k1_int_cmov(&r, &a, 1); CHECK(r == 1); r = 1; a = 0; secp256k1_int_cmov(&r, &a, 0); CHECK(r == 1); } void fe_cmov_test(void) { static const secp256k1_fe zero = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0); static const secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); static const secp256k1_fe max = SECP256K1_FE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); secp256k1_fe r = max; secp256k1_fe a = zero; secp256k1_fe_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_fe_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_fe_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_fe_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_fe_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void fe_storage_cmov_test(void) { static const secp256k1_fe_storage zero = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0); static const secp256k1_fe_storage one = SECP256K1_FE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1); static const secp256k1_fe_storage max = SECP256K1_FE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); secp256k1_fe_storage r = max; secp256k1_fe_storage a = zero; secp256k1_fe_storage_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_fe_storage_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_fe_storage_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_fe_storage_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_fe_storage_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void scalar_cmov_test(void) { static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); static const secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); static const secp256k1_scalar max = SECP256K1_SCALAR_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); secp256k1_scalar r = max; secp256k1_scalar a = zero; secp256k1_scalar_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_scalar_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_scalar_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_scalar_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_scalar_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void ge_storage_cmov_test(void) { static const secp256k1_ge_storage zero = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); static const secp256k1_ge_storage one = SECP256K1_GE_STORAGE_CONST(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1); static const secp256k1_ge_storage max = SECP256K1_GE_STORAGE_CONST( 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL ); secp256k1_ge_storage r = max; secp256k1_ge_storage a = zero; secp256k1_ge_storage_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); r = zero; a = max; secp256k1_ge_storage_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &max, sizeof(r)) == 0); a = zero; secp256k1_ge_storage_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &zero, sizeof(r)) == 0); a = one; secp256k1_ge_storage_cmov(&r, &a, 1); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); r = one; a = zero; secp256k1_ge_storage_cmov(&r, &a, 0); CHECK(secp256k1_memcmp_var(&r, &one, sizeof(r)) == 0); } void run_cmov_tests(void) { int_cmov_test(); fe_cmov_test(); fe_storage_cmov_test(); scalar_cmov_test(); ge_storage_cmov_test(); } int main(int argc, char **argv) { /* Disable buffering for stdout to improve reliability of getting * diagnostic information. Happens right at the start of main because * setbuf must be used before any other operation on the stream. */ setbuf(stdout, NULL); /* Also disable buffering for stderr because it's not guaranteed that it's * unbuffered on all systems. */ setbuf(stderr, NULL); /* find iteration count */ if (argc > 1) { count = strtol(argv[1], NULL, 0); } printf("test count = %i\n", count); /* find random seed */ secp256k1_testrand_init(argc > 2 ? argv[2] : NULL); /* initialize */ run_context_tests(0); run_context_tests(1); run_scratch_tests(); ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); if (secp256k1_testrand_bits(1)) { unsigned char rand32[32]; secp256k1_testrand256(rand32); CHECK(secp256k1_context_randomize(ctx, secp256k1_testrand_bits(1) ? rand32 : NULL)); } run_rand_bits(); run_rand_int(); run_sha256_tests(); run_hmac_sha256_tests(); run_rfc6979_hmac_sha256_tests(); #ifndef USE_NUM_NONE /* num tests */ run_num_smalltests(); #endif /* scalar tests */ run_scalar_tests(); /* field tests */ run_field_inv(); run_field_inv_var(); run_field_inv_all_var(); run_field_misc(); run_field_convert(); run_sqr(); run_sqrt(); /* group tests */ run_ge(); run_group_decompress(); /* ecmult tests */ run_wnaf(); run_point_times_order(); run_ecmult_near_split_bound(); run_ecmult_chain(); run_ecmult_constants(); run_ecmult_gen_blind(); run_ecmult_const_tests(); run_ecmult_multi_tests(); run_ec_combine(); /* endomorphism tests */ -#ifdef USE_ENDOMORPHISM run_endomorphism_tests(); -#endif /* EC point parser test */ run_ec_pubkey_parse_test(); /* EC key edge cases */ run_eckey_edge_case_test(); /* EC key arithmetic test */ run_eckey_negate_test(); #ifdef ENABLE_MODULE_ECDH /* ecdh tests */ run_ecdh_tests(); #endif /* ecdsa tests */ run_random_pubkeys(); run_ecdsa_der_parse(); run_ecdsa_sign_verify(); run_ecdsa_end_to_end(); run_ecdsa_edge_cases(); #ifdef ENABLE_OPENSSL_TESTS run_ecdsa_openssl(); #endif #ifdef ENABLE_MODULE_MULTISET run_multiset_tests(); #endif #ifdef ENABLE_MODULE_RECOVERY /* ECDSA pubkey recovery tests */ run_recovery_tests(); #endif #ifdef ENABLE_MODULE_SCHNORR /* Schnorr signature tests */ run_schnorr_tests(); #endif #ifdef ENABLE_MODULE_EXTRAKEYS run_extrakeys_tests(); #endif #ifdef ENABLE_MODULE_SCHNORRSIG run_schnorrsig_tests(); #endif /* util tests */ run_memczero_test(); run_cmov_tests(); secp256k1_testrand_finish(); /* shutdown */ secp256k1_context_destroy(ctx); printf("no problems found\n"); return 0; } diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c index 8b642a60b..f4d5b8e17 100644 --- a/src/secp256k1/src/tests_exhaustive.c +++ b/src/secp256k1/src/tests_exhaustive.c @@ -1,458 +1,454 @@ /*********************************************************************** * Copyright (c) 2016 Andrew Poelstra * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #if defined HAVE_CONFIG_H #include "libsecp256k1-config.h" #endif #include #include #include #undef USE_ECMULT_STATIC_PRECOMPUTATION #ifndef EXHAUSTIVE_TEST_ORDER /* see group_impl.h for allowable values */ #define EXHAUSTIVE_TEST_ORDER 13 #endif #include "include/secp256k1.h" #include "assumptions.h" #include "group.h" #include "secp256k1.c" #include "testrand_impl.h" static int count = 2; /** stolen from tests.c */ void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { CHECK(a->infinity == b->infinity); if (a->infinity) { return; } CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); } void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { secp256k1_fe z2s; secp256k1_fe u1, u2, s1, s2; CHECK(a->infinity == b->infinity); if (a->infinity) { return; } /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ secp256k1_fe_sqr(&z2s, &b->z); secp256k1_fe_mul(&u1, &a->x, &z2s); u2 = b->x; secp256k1_fe_normalize_weak(&u2); secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); s2 = b->y; secp256k1_fe_normalize_weak(&s2); CHECK(secp256k1_fe_equal_var(&u1, &u2)); CHECK(secp256k1_fe_equal_var(&s1, &s2)); } void random_fe(secp256k1_fe *x) { unsigned char bin[32]; do { secp256k1_testrand256(bin); if (secp256k1_fe_set_b32(x, bin)) { return; } } while(1); } /** END stolen from tests.c */ static uint32_t num_cores = 1; static uint32_t this_core = 0; SECP256K1_INLINE static int skip_section(uint64_t* iter) { if (num_cores == 1) return 0; *iter += 0xe7037ed1a0b428dbULL; return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core; } int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int attempt) { secp256k1_scalar s; int *idata = data; (void)msg32; (void)key32; (void)algo16; /* Some nonces cannot be used because they'd cause s and/or r to be zero. * The signing function has retry logic here that just re-calls the nonce * function with an increased `attempt`. So if attempt > 0 this means we * need to change the nonce to avoid an infinite loop. */ if (attempt > 0) { *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; } secp256k1_scalar_set_int(&s, *idata); secp256k1_scalar_get_b32(nonce32, &s); return 1; } -#ifdef USE_ENDOMORPHISM void test_exhaustive_endomorphism(const secp256k1_ge *group) { int i; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_ge res; secp256k1_ge_mul_lambda(&res, &group[i]); ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); } } -#endif void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) { int i, j; uint64_t iter = 0; /* Sanity-check (and check infinity functions) */ CHECK(secp256k1_ge_is_infinity(&group[0])); CHECK(secp256k1_gej_is_infinity(&groupj[0])); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { CHECK(!secp256k1_ge_is_infinity(&group[i])); CHECK(!secp256k1_gej_is_infinity(&groupj[i])); } /* Check all addition formulae */ for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { secp256k1_fe fe_inv; if (skip_section(&iter)) continue; secp256k1_fe_inv(&fe_inv, &groupj[j].z); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_ge zless_gej; secp256k1_gej tmp; /* add_var */ secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_ge */ if (j > 0) { secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* add_ge_var */ secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); /* add_zinv_var */ zless_gej.infinity = groupj[j].infinity; zless_gej.x = groupj[j].x; zless_gej.y = groupj[j].y; secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } /* Check doubling */ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_gej tmp; secp256k1_gej_double(&tmp, &groupj[i]); ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); secp256k1_gej_double_var(&tmp, &groupj[i], NULL); ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp); } /* Check negation */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_ge tmp; secp256k1_gej tmpj; secp256k1_ge_neg(&tmp, &group[i]); ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp); secp256k1_gej_neg(&tmpj, &groupj[i]); ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj); } } void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj) { int i, j, r_log; uint64_t iter = 0; for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { if (skip_section(&iter)) continue; for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_gej tmp; secp256k1_scalar na, ng; secp256k1_scalar_set_int(&na, i); secp256k1_scalar_set_int(&ng, j); secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp); if (i > 0) { secp256k1_ecmult_const(&tmp, &group[i], &ng, 256); ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp); } } } } } typedef struct { secp256k1_scalar sc[2]; secp256k1_ge pt[2]; } ecmult_multi_data; static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { ecmult_multi_data *data = (ecmult_multi_data*) cbdata; *sc = data->sc[idx]; *pt = data->pt[idx]; return 1; } void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group) { int i, j, k, x, y; uint64_t iter = 0; secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) { if (skip_section(&iter)) continue; for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) { secp256k1_gej tmp; secp256k1_scalar g_sc; ecmult_multi_data data; secp256k1_scalar_set_int(&data.sc[0], i); secp256k1_scalar_set_int(&data.sc[1], j); secp256k1_scalar_set_int(&g_sc, k); data.pt[0] = group[x]; data.pt[1] = group[y]; secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp); } } } } } secp256k1_scratch_destroy(&ctx->error_callback, scratch); } void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k, int* overflow) { secp256k1_fe x; unsigned char x_bin[32]; k %= EXHAUSTIVE_TEST_ORDER; x = group[k].x; secp256k1_fe_normalize(&x); secp256k1_fe_get_b32(x_bin, &x); secp256k1_scalar_set_b32(r, x_bin, overflow); } void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group) { int s, r, msg, key; uint64_t iter = 0; for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { secp256k1_ge nonconst_ge; secp256k1_ecdsa_signature sig; secp256k1_pubkey pk; secp256k1_scalar sk_s, msg_s, r_s, s_s; secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; int k, should_verify; unsigned char msg32[32]; if (skip_section(&iter)) continue; secp256k1_scalar_set_int(&s_s, s); secp256k1_scalar_set_int(&r_s, r); secp256k1_scalar_set_int(&msg_s, msg); secp256k1_scalar_set_int(&sk_s, key); /* Verify by hand */ /* Run through every k value that gives us this r and check that *one* works. * Note there could be none, there could be multiple, ECDSA is weird. */ should_verify = 0; for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { secp256k1_scalar check_x_s; r_from_k(&check_x_s, group, k, NULL); if (r_s == check_x_s) { secp256k1_scalar_set_int(&s_times_k_s, k); secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); } } /* nb we have a "high s" rule */ should_verify &= !secp256k1_scalar_is_high(&s_s); /* Verify by calling verify */ secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); secp256k1_pubkey_save(&pk, &nonconst_ge); secp256k1_scalar_get_b32(msg32, &msg_s); CHECK(should_verify == secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); } } } } } void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group) { int i, j, k; uint64_t iter = 0; /* Loop */ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */ for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */ if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; secp256k1_ecdsa_signature sig; secp256k1_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; secp256k1_scalar_set_int(&msg, i); secp256k1_scalar_set_int(&sk, j); secp256k1_scalar_get_b32(sk32, &sk); secp256k1_scalar_get_b32(msg32, &msg); secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important * because our nonce-computing function function might change k during * signing. */ r_from_k(&expected_r, group, k, NULL); CHECK(r == expected_r); CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER); /* Overflow means we've tried every possible nonce */ if (k < starting_k) { break; } } } } /* We would like to verify zero-knowledge here by counting how often every * possible (s, r) tuple appears, but because the group order is larger * than the field order, when coercing the x-values to scalar values, some * appear more often than others, so we are actually not zero-knowledge. * (This effect also appears in the real code, but the difference is on the * order of 1/2^128th the field order, so the deviation is not useful to a * computationally bounded attacker.) */ } #ifdef ENABLE_MODULE_RECOVERY #include "src/modules/recovery/tests_exhaustive_impl.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS #include "src/modules/extrakeys/tests_exhaustive_impl.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG #include "src/modules/schnorrsig/tests_exhaustive_impl.h" #endif int main(int argc, char** argv) { int i; secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; unsigned char rand32[32]; secp256k1_context *ctx; /* Disable buffering for stdout to improve reliability of getting * diagnostic information. Happens right at the start of main because * setbuf must be used before any other operation on the stream. */ setbuf(stdout, NULL); /* Also disable buffering for stderr because it's not guaranteed that it's * unbuffered on all systems. */ setbuf(stderr, NULL); printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER); /* find iteration count */ if (argc > 1) { count = strtol(argv[1], NULL, 0); } printf("test count = %i\n", count); /* find random seed */ secp256k1_testrand_init(argc > 2 ? argv[2] : NULL); /* set up split processing */ if (argc > 4) { num_cores = strtol(argv[3], NULL, 0); this_core = strtol(argv[4], NULL, 0); if (num_cores < 1 || this_core >= num_cores) { fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]); return 1; } printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1); } while (count--) { /* Build context */ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); secp256k1_testrand256(rand32); CHECK(secp256k1_context_randomize(ctx, rand32)); /* Generate the entire group */ secp256k1_gej_set_infinity(&groupj[0]); secp256k1_ge_set_gej(&group[0], &groupj[0]); for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); secp256k1_ge_set_gej(&group[i], &groupj[i]); if (count != 0) { /* Set a different random z-value for each Jacobian point, except z=1 is used in the last iteration. */ secp256k1_fe z; random_fe(&z); secp256k1_gej_rescale(&groupj[i], &z); } /* Verify against ecmult_gen */ { secp256k1_scalar scalar_i; secp256k1_gej generatedj; secp256k1_ge generated; secp256k1_scalar_set_int(&scalar_i, i); secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); secp256k1_ge_set_gej(&generated, &generatedj); CHECK(group[i].infinity == 0); CHECK(generated.infinity == 0); CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); } } /* Run the tests */ -#ifdef USE_ENDOMORPHISM test_exhaustive_endomorphism(group); -#endif test_exhaustive_addition(group, groupj); test_exhaustive_ecmult(ctx, group, groupj); test_exhaustive_ecmult_multi(ctx, group); test_exhaustive_sign(ctx, group); test_exhaustive_verify(ctx, group); #ifdef ENABLE_MODULE_RECOVERY test_exhaustive_recovery(ctx, group); #endif #ifdef ENABLE_MODULE_EXTRAKEYS test_exhaustive_extrakeys(ctx, group); #endif #ifdef ENABLE_MODULE_SCHNORRSIG test_exhaustive_schnorrsig(ctx); #endif secp256k1_context_destroy(ctx); } secp256k1_testrand_finish(); printf("no problems found\n"); return 0; } diff --git a/src/secp256k1/travis/build_autotools.sh b/src/secp256k1/travis/build_autotools.sh index 8c6aca02c..92366e731 100755 --- a/src/secp256k1/travis/build_autotools.sh +++ b/src/secp256k1/travis/build_autotools.sh @@ -1,103 +1,102 @@ #!/usr/bin/env bash export LC_ALL=C set -ex # FIXME The java tests will fail on macOS with autotools due to the # libsepc256k1_jni library referencing the libsecp256k1 library with an absolute # path. This needs to be rewritten with install_name_tool to use relative paths # via the @variables supported by the macOS loader. if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$JNI" = "yes" ] then echo "Skipping the java tests built with autotools on OSX" exit 0 fi if [ -n "$HOST" ]; then USE_HOST="--host=$HOST" fi if [ "x$HOST" = "xi686-linux-gnu" ]; then CC="$CC -m32" fi if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$TRAVIS_COMPILER" = "gcc" ] then CC="gcc-9" fi $CC --version ./autogen.sh mkdir buildautotools pushd buildautotools ../configure \ --enable-experimental=$EXPERIMENTAL \ - --enable-endomorphism=$ENDOMORPHISM \ --with-test-override-wide-multiply=$WIDEMUL \ --with-bignum=$BIGNUM \ --with-asm=$ASM \ --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION \ --with-ecmult-gen-precision=$ECMULTGENPRECISION \ --enable-module-ecdh=$ECDH \ --enable-module-multiset=$MULTISET \ --enable-module-recovery=$RECOVERY \ --enable-module-schnorr=$SCHNORR \ --enable-module-schnorrsig=$SCHNORRSIG \ --enable-jni=$JNI \ --enable-openssl-tests=$OPENSSL_TESTS \ --with-valgrind=$WITH_VALGRIND \ $AUTOTOOLS_EXTRA_FLAGS \ $USE_HOST print_logs() { cat tests.log || : cat exhaustive_tests.log || : cat valgrind_ctime_test.log || : cat bench.log || : } trap 'print_logs' ERR make -j2 $AUTOTOOLS_TARGET if [ "$RUN_VALGRIND" = "yes" ]; then # the `--error-exitcode` is required to make the test fail if valgrind found # errors, otherwise it'll return 0 # (http://valgrind.org/docs/manual/manual-core.html) valgrind --error-exitcode=42 ./tests 16 valgrind --error-exitcode=42 ./exhaustive_tests fi if [ "$BENCH" = "yes" ]; then if [ "$RUN_VALGRIND" = "yes" ]; then # Using the local `libtool` because on macOS the system's libtool has # nothing to do with GNU libtool EXEC='./libtool --mode=execute valgrind --error-exitcode=42'; else EXEC= ; fi $EXEC ./bench_ecmult >> bench.log 2>&1 $EXEC ./bench_internal >> bench.log 2>&1 $EXEC ./bench_sign >> bench.log 2>&1 $EXEC ./bench_verify >> bench.log 2>&1 if [ "$RECOVERY" == "yes" ]; then $EXEC ./bench_recover >> bench.log 2>&1 fi if [ "$ECDH" == "yes" ]; then $EXEC ./bench_ecdh >> bench.log 2>&1 fi if [ "$MULTISET" == "yes" ]; then $EXEC ./bench_multiset >> bench.log 2>&1 fi if [ "$SCHNORRSIG" == "yes" ]; then $EXEC ./bench_schnorrsig >> bench.log 2>&1 fi fi if [ "$CTIMETEST" = "yes" ]; then ./libtool --mode=execute valgrind --error-exitcode=42 ./valgrind_ctime_test > valgrind_ctime_test.log 2>&1 fi popd diff --git a/src/secp256k1/travis/build_cmake.sh b/src/secp256k1/travis/build_cmake.sh index 906ee9360..2e5d672b8 100755 --- a/src/secp256k1/travis/build_cmake.sh +++ b/src/secp256k1/travis/build_cmake.sh @@ -1,57 +1,56 @@ #!/usr/bin/env bash export LC_ALL=C set -ex if [ "x$HOST" = "xi686-linux-gnu" ]; then CMAKE_EXTRA_FLAGS="$CMAKE_EXTRA_FLAGS -DCMAKE_C_FLAGS=-m32" fi if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$TRAVIS_COMPILER" = "gcc" ] then CMAKE_EXTRA_FLAGS="$CMAKE_EXTRA_FLAGS -DCMAKE_C_COMPILER=gcc-9" fi # "auto" is not a valid value for SECP256K1_ECMULT_GEN_PRECISION with cmake. # In this case we use the default value instead by not setting the cache # variable on the cmake command line. if [ "x$ECMULTGENPRECISION" != "xauto" ]; then ECMULT_GEN_PRECISION_ARG="-DSECP256K1_ECMULT_GEN_PRECISION=$ECMULTGENPRECISION" fi mkdir -p buildcmake/install pushd buildcmake CMAKE_COMMAND=cmake # Override with the cmake version installed via the install_cmake.sh script on # amd64 if [ "${TRAVIS_CPU_ARCH}" = "amd64" ]; then if [ "$TRAVIS_OS_NAME" = "linux" ]; then CMAKE_COMMAND=/opt/cmake/bin/cmake elif [ "$TRAVIS_OS_NAME" = "osx" ]; then CMAKE_COMMAND=/opt/cmake/CMake.app/Contents/bin/cmake fi fi ${CMAKE_COMMAND} --version ${CMAKE_COMMAND} -GNinja .. \ -DCMAKE_INSTALL_PREFIX=install \ -DSECP256K1_BUILD_OPENSSL_TESTS=$OPENSSL_TESTS \ -DSECP256K1_ECMULT_STATIC_PRECOMPUTATION=$STATICPRECOMPUTATION \ -DSECP256K1_ENABLE_MODULE_ECDH=$ECDH \ -DSECP256K1_ENABLE_MODULE_MULTISET=$MULTISET \ -DSECP256K1_ENABLE_MODULE_RECOVERY=$RECOVERY \ -DSECP256K1_ENABLE_MODULE_SCHNORR=$SCHNORR \ -DSECP256K1_ENABLE_MODULE_EXTRAKEYS=$SCHNORRSIG \ -DSECP256K1_ENABLE_MODULE_SCHNORRSIG=$SCHNORRSIG \ -DSECP256K1_ENABLE_JNI=$JNI \ - -DSECP256K1_ENABLE_ENDOMORPHISM=$ENDOMORPHISM \ -DSECP256K1_ENABLE_BIGNUM=$BIGNUM \ -DSECP256K1_USE_ASM=$ASM \ -DSECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY=$WIDEMUL \ $ECMULT_GEN_PRECISION_ARG \ $CMAKE_EXTRA_FLAGS ninja $CMAKE_TARGET popd