diff --git a/src/secp256k1/src/ecmult_gen_impl.h b/src/secp256k1/src/ecmult_gen_impl.h index f818d45c29..0e2eafa71d 100644 --- a/src/secp256k1/src/ecmult_gen_impl.h +++ b/src/secp256k1/src/ecmult_gen_impl.h @@ -1,211 +1,211 @@ /********************************************************************** * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_ECMULT_GEN_IMPL_H #define SECP256K1_ECMULT_GEN_IMPL_H #include "util.h" #include "scalar.h" #include "group.h" #include "ecmult_gen.h" #include "hash_impl.h" #ifdef USE_ECMULT_STATIC_PRECOMPUTATION #include "ecmult_static_context.h" #endif #ifndef USE_ECMULT_STATIC_PRECOMPUTATION static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec)); #else static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0; #endif static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) { ctx->prec = NULL; } static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION secp256k1_ge prec[1024]; secp256k1_gej gj; secp256k1_gej nums_gej; int i, j; size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; void* const base = *prealloc; #endif if (ctx->prec != NULL) { return; } #ifndef USE_ECMULT_STATIC_PRECOMPUTATION ctx->prec = (secp256k1_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size); /* get the generator */ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ { static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; secp256k1_fe nums_x; secp256k1_ge nums_ge; int r; r = secp256k1_fe_set_b32(&nums_x, nums_b32); (void)r; VERIFY_CHECK(r); r = secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0); (void)r; VERIFY_CHECK(r); secp256k1_gej_set_ge(&nums_gej, &nums_ge); /* Add G to make the bits in x uniformly distributed. */ secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL); } /* compute prec. */ { secp256k1_gej precj[1024]; /* Jacobian versions of prec. */ secp256k1_gej gbase; secp256k1_gej numsbase; gbase = gj; /* 16^j * G */ numsbase = nums_gej; /* 2^j * nums. */ for (j = 0; j < 64; j++) { /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ precj[j*16] = numsbase; for (i = 1; i < 16; i++) { secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); } /* Multiply gbase by 16. */ for (i = 0; i < 4; i++) { secp256k1_gej_double_var(&gbase, &gbase, NULL); } /* Multiply numbase by 2. */ secp256k1_gej_double_var(&numsbase, &numsbase, NULL); if (j == 62) { /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ secp256k1_gej_neg(&numsbase, &numsbase); secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } secp256k1_ge_set_all_gej_var(prec, precj, 1024); } for (j = 0; j < 64; j++) { for (i = 0; i < 16; i++) { secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); } } #else (void)prealloc; ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context; #endif secp256k1_ecmult_gen_blind(ctx, NULL); } static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx) { return ctx->prec != NULL; } static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION if (src->prec != NULL) { /* We cast to void* first to suppress a -Wcast-align warning. */ dst->prec = (secp256k1_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src)); } #else (void)dst, (void)src; #endif } static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) { secp256k1_scalar_clear(&ctx->blind); secp256k1_gej_clear(&ctx->initial); ctx->prec = NULL; } static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp256k1_gej *r, const secp256k1_scalar *gn) { secp256k1_ge add; secp256k1_ge_storage adds; secp256k1_scalar gnb; int bits; int i, j; memset(&adds, 0, sizeof(adds)); *r = ctx->initial; /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ secp256k1_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; for (j = 0; j < 64; j++) { bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4); for (i = 0; i < 16; i++) { /** This uses a conditional move to avoid any secret data in array indexes. * _Any_ use of secret indexes has been demonstrated to result in timing * sidechannels, even when the cache-line access patterns are uniform. * See also: * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006, * by Dag Arne Osvik, Adi Shamir, and Eran Tromer * (http://www.tau.ac.il/~tromer/papers/cache.pdf) */ secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); } secp256k1_ge_from_storage(&add, &adds); secp256k1_gej_add_ge(r, r, &add); } bits = 0; secp256k1_ge_clear(&add); secp256k1_scalar_clear(&gnb); } /* Setup blinding values for secp256k1_ecmult_gen. */ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32) { secp256k1_scalar b; secp256k1_gej gb; secp256k1_fe s; unsigned char nonce32[32]; secp256k1_rfc6979_hmac_sha256 rng; int retry; unsigned char keydata[64] = {0}; if (seed32 == NULL) { /* When seed is NULL, reset the initial point and blinding value. */ secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g); secp256k1_gej_neg(&ctx->initial, &ctx->initial); secp256k1_scalar_set_int(&ctx->blind, 1); } /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ secp256k1_scalar_get_b32(nonce32, &ctx->blind); /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, * and guards against weak or adversarial seeds. This is a simpler and safer interface than * asking the caller for blinding values directly and expecting them to retry on failure. */ memcpy(keydata, nonce32, 32); if (seed32 != NULL) { memcpy(keydata + 32, seed32, 32); } secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); memset(keydata, 0, sizeof(keydata)); /* Retry for out of range results to achieve uniformity. */ do { secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); retry = !secp256k1_fe_set_b32(&s, nonce32); - retry |= secp256k1_fe_is_zero(&s); + retry = retry || secp256k1_fe_is_zero(&s); } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */ /* Randomize the projection to defend against multiplier sidechannels. */ secp256k1_gej_rescale(&ctx->initial, &s); secp256k1_fe_clear(&s); do { secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); secp256k1_scalar_set_b32(&b, nonce32, &retry); /* A blinding value of 0 works, but would undermine the projection hardening. */ - retry |= secp256k1_scalar_is_zero(&b); + retry = retry || secp256k1_scalar_is_zero(&b); } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */ secp256k1_rfc6979_hmac_sha256_finalize(&rng); memset(nonce32, 0, 32); secp256k1_ecmult_gen(ctx, &gb, &b); secp256k1_scalar_negate(&b, &b); ctx->blind = b; ctx->initial = gb; secp256k1_scalar_clear(&b); secp256k1_gej_clear(&gb); } #endif /* SECP256K1_ECMULT_GEN_IMPL_H */ diff --git a/src/secp256k1/src/modules/recovery/main_impl.h b/src/secp256k1/src/modules/recovery/main_impl.h index c5cc353ad3..93d80b1013 100644 --- a/src/secp256k1/src/modules/recovery/main_impl.h +++ b/src/secp256k1/src/modules/recovery/main_impl.h @@ -1,194 +1,194 @@ /********************************************************************** * Copyright (c) 2013-2015 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #ifndef SECP256K1_MODULE_RECOVERY_MAIN_H #define SECP256K1_MODULE_RECOVERY_MAIN_H #include "include/secp256k1_recovery.h" static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) { (void)ctx; if (sizeof(secp256k1_scalar) == 32) { /* When the secp256k1_scalar type is exactly 32 byte, use its * representation inside secp256k1_ecdsa_signature, as conversion is very fast. * Note that secp256k1_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { secp256k1_scalar_set_b32(r, &sig->data[0], NULL); secp256k1_scalar_set_b32(s, &sig->data[32], NULL); } *recid = sig->data[64]; } static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) { if (sizeof(secp256k1_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { secp256k1_scalar_get_b32(&sig->data[0], r); secp256k1_scalar_get_b32(&sig->data[32], s); } sig->data[64] = recid; } int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { secp256k1_scalar r, s; int ret = 1; int overflow = 0; (void)ctx; ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); ARG_CHECK(recid >= 0 && recid <= 3); secp256k1_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; secp256k1_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid); } else { memset(sig, 0, sizeof(*sig)); } return ret; } int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) { secp256k1_scalar r, s; (void)ctx; ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(recid != NULL); secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); secp256k1_scalar_get_b32(&output64[0], &r); secp256k1_scalar_get_b32(&output64[32], &s); return 1; } int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) { secp256k1_scalar r, s; int recid; (void)ctx; ARG_CHECK(sig != NULL); ARG_CHECK(sigin != NULL); secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); secp256k1_ecdsa_signature_save(sig, &r, &s); return 1; } static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) { unsigned char brx[32]; secp256k1_fe fx; secp256k1_ge x; secp256k1_gej xj; secp256k1_scalar rn, u1, u2; secp256k1_gej qj; int r; if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { return 0; } secp256k1_scalar_get_b32(brx, sigr); r = secp256k1_fe_set_b32(&fx, brx); (void)r; VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */ if (recid & 2) { if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) { return 0; } secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe); } if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) { return 0; } secp256k1_gej_set_ge(&xj, &x); secp256k1_scalar_inverse_var(&rn, sigr); secp256k1_scalar_mul(&u1, &rn, message); secp256k1_scalar_negate(&u1, &u1); secp256k1_scalar_mul(&u2, &rn, sigs); secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1); secp256k1_ge_set_gej_var(pubkey, &qj); return !secp256k1_gej_is_infinity(&qj); } int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { secp256k1_scalar r, s; secp256k1_scalar sec, non, msg; int recid; int ret = 0; int overflow = 0; const unsigned char secp256k1_ecdsa_recoverable_algo16[17] = "ECDSA+Recovery "; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); if (noncefp == NULL) { noncefp = secp256k1_nonce_function_default; } secp256k1_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ if (!overflow && !secp256k1_scalar_is_zero(&sec)) { unsigned char nonce32[32]; unsigned int count = 0; secp256k1_scalar_set_b32(&msg, msg32, NULL); while (1) { ret = noncefp(nonce32, msg32, seckey, secp256k1_ecdsa_recoverable_algo16, (void*)noncedata, count); if (!ret) { break; } secp256k1_scalar_set_b32(&non, nonce32, &overflow); - if (!secp256k1_scalar_is_zero(&non) && !overflow) { + if (!overflow && !secp256k1_scalar_is_zero(&non)) { if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { break; } } count++; } memset(nonce32, 0, 32); secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); secp256k1_scalar_clear(&sec); } if (ret) { secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid); } else { memset(signature, 0, sizeof(*signature)); } return ret; } int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) { secp256k1_ge q; secp256k1_scalar r, s; secp256k1_scalar m; int recid; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(pubkey != NULL); secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ secp256k1_scalar_set_b32(&m, msg32, NULL); if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { secp256k1_pubkey_save(pubkey, &q); return 1; } else { memset(pubkey, 0, sizeof(*pubkey)); return 0; } } #endif /* SECP256K1_MODULE_RECOVERY_MAIN_H */ diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c index 6022960bae..739862cddb 100644 --- a/src/secp256k1/src/secp256k1.c +++ b/src/secp256k1/src/secp256k1.c @@ -1,699 +1,699 @@ /********************************************************************** * Copyright (c) 2013-2015 Pieter Wuille * * Distributed under the MIT software license, see the accompanying * * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ #include "include/secp256k1.h" #include "include/secp256k1_preallocated.h" #include "util.h" #include "num_impl.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" #include "ecmult_impl.h" #include "ecmult_const_impl.h" #include "ecmult_gen_impl.h" #include "ecdsa_impl.h" #include "eckey_impl.h" #include "hash_impl.h" #include "scratch_impl.h" #define ARG_CHECK(cond) do { \ if (EXPECT(!(cond), 0)) { \ secp256k1_callback_call(&ctx->illegal_callback, #cond); \ return 0; \ } \ } while(0) #define ARG_CHECK_NO_RETURN(cond) do { \ if (EXPECT(!(cond), 0)) { \ secp256k1_callback_call(&ctx->illegal_callback, #cond); \ } \ } while(0) #ifndef USE_EXTERNAL_DEFAULT_CALLBACKS #include #include static void secp256k1_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } static void secp256k1_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } #else void secp256k1_default_illegal_callback_fn(const char* str, void* data); void secp256k1_default_error_callback_fn(const char* str, void* data); #endif static const secp256k1_callback default_illegal_callback = { secp256k1_default_illegal_callback_fn, NULL }; static const secp256k1_callback default_error_callback = { secp256k1_default_error_callback_fn, NULL }; struct secp256k1_context_struct { secp256k1_ecmult_context ecmult_ctx; secp256k1_ecmult_gen_context ecmult_gen_ctx; secp256k1_callback illegal_callback; secp256k1_callback error_callback; }; static const secp256k1_context secp256k1_context_no_precomp_ = { { 0 }, { 0 }, { secp256k1_default_illegal_callback_fn, 0 }, { secp256k1_default_error_callback_fn, 0 } }; const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_no_precomp_; size_t secp256k1_context_preallocated_size(unsigned int flags) { size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context)); if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { secp256k1_callback_call(&default_illegal_callback, "Invalid flags"); return 0; } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; } return ret; } size_t secp256k1_context_preallocated_clone_size(const secp256k1_context* ctx) { size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context)); VERIFY_CHECK(ctx != NULL); if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; } if (secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)) { ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; } return ret; } secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigned int flags) { void* const base = prealloc; size_t prealloc_size; secp256k1_context* ret; VERIFY_CHECK(prealloc != NULL); prealloc_size = secp256k1_context_preallocated_size(flags); ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size); ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { secp256k1_callback_call(&ret->illegal_callback, "Invalid flags"); return NULL; } secp256k1_ecmult_context_init(&ret->ecmult_ctx); secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { secp256k1_ecmult_context_build(&ret->ecmult_ctx, &prealloc); } return (secp256k1_context*) ret; } secp256k1_context* secp256k1_context_create(unsigned int flags) { size_t const prealloc_size = secp256k1_context_preallocated_size(flags); secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size); if (EXPECT(secp256k1_context_preallocated_create(ctx, flags) == NULL, 0)) { free(ctx); return NULL; } return ctx; } secp256k1_context* secp256k1_context_preallocated_clone(const secp256k1_context* ctx, void* prealloc) { size_t prealloc_size; secp256k1_context* ret; VERIFY_CHECK(ctx != NULL); ARG_CHECK(prealloc != NULL); prealloc_size = secp256k1_context_preallocated_clone_size(ctx); ret = (secp256k1_context*)prealloc; memcpy(ret, ctx, prealloc_size); secp256k1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); secp256k1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); return ret; } secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) { secp256k1_context* ret; size_t prealloc_size; VERIFY_CHECK(ctx != NULL); prealloc_size = secp256k1_context_preallocated_clone_size(ctx); ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size); ret = secp256k1_context_preallocated_clone(ctx, ret); return ret; } void secp256k1_context_preallocated_destroy(secp256k1_context* ctx) { ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); if (ctx != NULL) { secp256k1_ecmult_context_clear(&ctx->ecmult_ctx); secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); } } void secp256k1_context_destroy(secp256k1_context* ctx) { if (ctx != NULL) { secp256k1_context_preallocated_destroy(ctx); free(ctx); } } void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); if (fun == NULL) { fun = secp256k1_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); if (fun == NULL) { fun = secp256k1_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; } secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context* ctx, size_t max_size) { VERIFY_CHECK(ctx != NULL); return secp256k1_scratch_create(&ctx->error_callback, max_size); } void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scratch_space* scratch) { VERIFY_CHECK(ctx != NULL); secp256k1_scratch_destroy(&ctx->error_callback, scratch); } static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) { if (sizeof(secp256k1_ge_storage) == 64) { /* When the secp256k1_ge_storage type is exactly 64 byte, use its * representation inside secp256k1_pubkey, as conversion is very fast. * Note that secp256k1_pubkey_save must use the same representation. */ secp256k1_ge_storage s; memcpy(&s, &pubkey->data[0], sizeof(s)); secp256k1_ge_from_storage(ge, &s); } else { /* Otherwise, fall back to 32-byte big endian for X and Y. */ secp256k1_fe x, y; secp256k1_fe_set_b32(&x, pubkey->data); secp256k1_fe_set_b32(&y, pubkey->data + 32); secp256k1_ge_set_xy(ge, &x, &y); } ARG_CHECK(!secp256k1_fe_is_zero(&ge->x)); return 1; } static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) { if (sizeof(secp256k1_ge_storage) == 64) { secp256k1_ge_storage s; secp256k1_ge_to_storage(&s, ge); memcpy(&pubkey->data[0], &s, sizeof(s)); } else { VERIFY_CHECK(!secp256k1_ge_is_infinity(ge)); secp256k1_fe_normalize_var(&ge->x); secp256k1_fe_normalize_var(&ge->y); secp256k1_fe_get_b32(pubkey->data, &ge->x); secp256k1_fe_get_b32(pubkey->data + 32, &ge->y); } } int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) { secp256k1_ge Q; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(input != NULL); if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) { return 0; } secp256k1_pubkey_save(pubkey, &Q); secp256k1_ge_clear(&Q); return 1; } int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) { secp256k1_ge Q; size_t len; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65)); len = *outputlen; *outputlen = 0; ARG_CHECK(output != NULL); memset(output, 0, len); ARG_CHECK(pubkey != NULL); ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); if (secp256k1_pubkey_load(ctx, &Q, pubkey)) { ret = secp256k1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); if (ret) { *outputlen = len; } } return ret; } static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) { (void)ctx; if (sizeof(secp256k1_scalar) == 32) { /* When the secp256k1_scalar type is exactly 32 byte, use its * representation inside secp256k1_ecdsa_signature, as conversion is very fast. * Note that secp256k1_ecdsa_signature_save must use the same representation. */ memcpy(r, &sig->data[0], 32); memcpy(s, &sig->data[32], 32); } else { secp256k1_scalar_set_b32(r, &sig->data[0], NULL); secp256k1_scalar_set_b32(s, &sig->data[32], NULL); } } static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) { if (sizeof(secp256k1_scalar) == 32) { memcpy(&sig->data[0], r, 32); memcpy(&sig->data[32], s, 32); } else { secp256k1_scalar_get_b32(&sig->data[0], r); secp256k1_scalar_get_b32(&sig->data[32], s); } } int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { secp256k1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input != NULL); if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) { secp256k1_ecdsa_signature_save(sig, &r, &s); return 1; } else { memset(sig, 0, sizeof(*sig)); return 0; } } int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input64) { secp256k1_scalar r, s; int ret = 1; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(input64 != NULL); secp256k1_scalar_set_b32(&r, &input64[0], &overflow); ret &= !overflow; secp256k1_scalar_set_b32(&s, &input64[32], &overflow); ret &= !overflow; if (ret) { secp256k1_ecdsa_signature_save(sig, &r, &s); } else { memset(sig, 0, sizeof(*sig)); } return ret; } int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) { secp256k1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output != NULL); ARG_CHECK(outputlen != NULL); ARG_CHECK(sig != NULL); secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s); } int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) { secp256k1_scalar r, s; VERIFY_CHECK(ctx != NULL); ARG_CHECK(output64 != NULL); ARG_CHECK(sig != NULL); secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); secp256k1_scalar_get_b32(&output64[0], &r); secp256k1_scalar_get_b32(&output64[32], &s); return 1; } int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) { secp256k1_scalar r, s; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(sigin != NULL); secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin); ret = secp256k1_scalar_is_high(&s); if (sigout != NULL) { if (ret) { secp256k1_scalar_negate(&s, &s); } secp256k1_ecdsa_signature_save(sigout, &r, &s); } return ret; } int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) { secp256k1_ge q; secp256k1_scalar r, s; secp256k1_scalar m; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(sig != NULL); ARG_CHECK(pubkey != NULL); secp256k1_scalar_set_b32(&m, msg32, NULL); secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); return (!secp256k1_scalar_is_high(&s) && secp256k1_pubkey_load(ctx, &q, pubkey) && secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); } static SECP256K1_INLINE void buffer_append(unsigned char *buf, unsigned int *offset, const void *data, unsigned int len) { memcpy(buf + *offset, data, len); *offset += len; } static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { unsigned char keydata[112]; unsigned int offset = 0; secp256k1_rfc6979_hmac_sha256 rng; unsigned int i; /* We feed a byte array to the PRNG as input, consisting of: * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data. * - optionally 16 extra bytes with the algorithm name. * Because the arguments have distinct fixed lengths it is not possible for * different argument mixtures to emulate each other and result in the same * nonces. */ buffer_append(keydata, &offset, key32, 32); buffer_append(keydata, &offset, msg32, 32); if (data != NULL) { buffer_append(keydata, &offset, data, 32); } if (algo16 != NULL) { buffer_append(keydata, &offset, algo16, 16); } secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, offset); memset(keydata, 0, sizeof(keydata)); for (i = 0; i <= counter; i++) { secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); } secp256k1_rfc6979_hmac_sha256_finalize(&rng); return 1; } const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979; const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979; int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { secp256k1_scalar r, s; secp256k1_scalar sec, non, msg; int ret = 0; int overflow = 0; const unsigned char secp256k1_ecdsa_der_algo16[17] = "ECDSA+DER "; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(msg32 != NULL); ARG_CHECK(signature != NULL); ARG_CHECK(seckey != NULL); if (noncefp == NULL) { noncefp = secp256k1_nonce_function_default; } secp256k1_scalar_set_b32(&sec, seckey, &overflow); /* Fail if the secret key is invalid. */ if (!overflow && !secp256k1_scalar_is_zero(&sec)) { unsigned char nonce32[32]; unsigned int count = 0; secp256k1_scalar_set_b32(&msg, msg32, NULL); while (1) { ret = noncefp(nonce32, msg32, seckey, secp256k1_ecdsa_der_algo16, (void*)noncedata, count); if (!ret) { break; } secp256k1_scalar_set_b32(&non, nonce32, &overflow); if (!overflow && !secp256k1_scalar_is_zero(&non)) { if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) { break; } } count++; } memset(nonce32, 0, 32); secp256k1_scalar_clear(&msg); secp256k1_scalar_clear(&non); secp256k1_scalar_clear(&sec); } if (ret) { secp256k1_ecdsa_signature_save(signature, &r, &s); } else { memset(signature, 0, sizeof(*signature)); } return ret; } int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) { secp256k1_scalar sec; int ret; int overflow; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); secp256k1_scalar_set_b32(&sec, seckey, &overflow); ret = !overflow && !secp256k1_scalar_is_zero(&sec); secp256k1_scalar_clear(&sec); return ret; } int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) { secp256k1_gej pj; secp256k1_ge p; secp256k1_scalar sec; int overflow; int ret = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); memset(pubkey, 0, sizeof(*pubkey)); ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(seckey != NULL); secp256k1_scalar_set_b32(&sec, seckey, &overflow); - ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec)); + ret = !overflow && !secp256k1_scalar_is_zero(&sec); if (ret) { secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec); secp256k1_ge_set_gej(&p, &pj); secp256k1_pubkey_save(pubkey, &p); } secp256k1_scalar_clear(&sec); return ret; } int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *seckey) { secp256k1_scalar sec; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); secp256k1_scalar_set_b32(&sec, seckey, NULL); secp256k1_scalar_negate(&sec, &sec); secp256k1_scalar_get_b32(seckey, &sec); secp256k1_scalar_clear(&sec); return 1; } int secp256k1_ec_pubkey_negate(const secp256k1_context* ctx, secp256k1_pubkey *pubkey) { int ret = 0; secp256k1_ge p; VERIFY_CHECK(ctx != NULL); ARG_CHECK(pubkey != NULL); ret = secp256k1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { secp256k1_ge_neg(&p, &p); secp256k1_pubkey_save(pubkey, &p); } return ret; } int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { secp256k1_scalar term; secp256k1_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak != NULL); secp256k1_scalar_set_b32(&term, tweak, &overflow); secp256k1_scalar_set_b32(&sec, seckey, NULL); ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term); memset(seckey, 0, 32); if (ret) { secp256k1_scalar_get_b32(seckey, &sec); } secp256k1_scalar_clear(&sec); secp256k1_scalar_clear(&term); return ret; } int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) { secp256k1_ge p; secp256k1_scalar term; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak != NULL); secp256k1_scalar_set_b32(&term, tweak, &overflow); ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) { secp256k1_pubkey_save(pubkey, &p); } else { ret = 0; } } return ret; } int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { secp256k1_scalar factor; secp256k1_scalar sec; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(seckey != NULL); ARG_CHECK(tweak != NULL); secp256k1_scalar_set_b32(&factor, tweak, &overflow); secp256k1_scalar_set_b32(&sec, seckey, NULL); ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor); memset(seckey, 0, 32); if (ret) { secp256k1_scalar_get_b32(seckey, &sec); } secp256k1_scalar_clear(&sec); secp256k1_scalar_clear(&factor); return ret; } int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) { secp256k1_ge p; secp256k1_scalar factor; int ret = 0; int overflow = 0; VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(pubkey != NULL); ARG_CHECK(tweak != NULL); secp256k1_scalar_set_b32(&factor, tweak, &overflow); ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey); memset(pubkey, 0, sizeof(*pubkey)); if (ret) { if (secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { secp256k1_pubkey_save(pubkey, &p); } else { ret = 0; } } return ret; } int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); } return 1; } int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, size_t n) { size_t i; secp256k1_gej Qj; secp256k1_ge Q; ARG_CHECK(pubnonce != NULL); memset(pubnonce, 0, sizeof(*pubnonce)); ARG_CHECK(n >= 1); ARG_CHECK(pubnonces != NULL); secp256k1_gej_set_infinity(&Qj); for (i = 0; i < n; i++) { secp256k1_pubkey_load(ctx, &Q, pubnonces[i]); secp256k1_gej_add_ge(&Qj, &Qj, &Q); } if (secp256k1_gej_is_infinity(&Qj)) { return 0; } secp256k1_ge_set_gej(&Q, &Qj); secp256k1_pubkey_save(pubnonce, &Q); return 1; } #ifdef ENABLE_MODULE_ECDH # include "modules/ecdh/main_impl.h" #endif #ifdef ENABLE_MODULE_MULTISET # include "modules/multiset/main_impl.h" #endif #ifdef ENABLE_MODULE_RECOVERY # include "modules/recovery/main_impl.h" #endif #ifdef ENABLE_MODULE_SCHNORR # include "modules/schnorr/main_impl.h" #endif