Changeset View
Changeset View
Standalone View
Standalone View
src/secp256k1/src/scalar_impl.h
Show First 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | |||||
#endif | #endif | ||||
static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) { | static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) { | ||||
int overflow; | int overflow; | ||||
secp256k1_scalar_set_b32(r, bin, &overflow); | secp256k1_scalar_set_b32(r, bin, &overflow); | ||||
return (!overflow) & (!secp256k1_scalar_is_zero(r)); | return (!overflow) & (!secp256k1_scalar_is_zero(r)); | ||||
} | } | ||||
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { | |||||
#if defined(EXHAUSTIVE_TEST_ORDER) | |||||
int i; | |||||
*r = 0; | |||||
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) | |||||
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) | |||||
*r = i; | |||||
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus | |||||
* have a composite group order; fix it in exhaustive_tests.c). */ | |||||
VERIFY_CHECK(*r != 0); | |||||
} | |||||
#else | |||||
secp256k1_scalar *t; | |||||
int i; | |||||
/* First compute xN as x ^ (2^N - 1) for some values of N, | |||||
* and uM as x ^ M for some values of M. */ | |||||
secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; | |||||
secp256k1_scalar u2, u5, u9, u11, u13; | |||||
secp256k1_scalar_sqr(&u2, x); | |||||
secp256k1_scalar_mul(&x2, &u2, x); | |||||
secp256k1_scalar_mul(&u5, &u2, &x2); | |||||
secp256k1_scalar_mul(&x3, &u5, &u2); | |||||
secp256k1_scalar_mul(&u9, &x3, &u2); | |||||
secp256k1_scalar_mul(&u11, &u9, &u2); | |||||
secp256k1_scalar_mul(&u13, &u11, &u2); | |||||
secp256k1_scalar_sqr(&x6, &u13); | |||||
secp256k1_scalar_sqr(&x6, &x6); | |||||
secp256k1_scalar_mul(&x6, &x6, &u11); | |||||
secp256k1_scalar_sqr(&x8, &x6); | |||||
secp256k1_scalar_sqr(&x8, &x8); | |||||
secp256k1_scalar_mul(&x8, &x8, &x2); | |||||
secp256k1_scalar_sqr(&x14, &x8); | |||||
for (i = 0; i < 5; i++) { | |||||
secp256k1_scalar_sqr(&x14, &x14); | |||||
} | |||||
secp256k1_scalar_mul(&x14, &x14, &x6); | |||||
secp256k1_scalar_sqr(&x28, &x14); | |||||
for (i = 0; i < 13; i++) { | |||||
secp256k1_scalar_sqr(&x28, &x28); | |||||
} | |||||
secp256k1_scalar_mul(&x28, &x28, &x14); | |||||
secp256k1_scalar_sqr(&x56, &x28); | |||||
for (i = 0; i < 27; i++) { | |||||
secp256k1_scalar_sqr(&x56, &x56); | |||||
} | |||||
secp256k1_scalar_mul(&x56, &x56, &x28); | |||||
secp256k1_scalar_sqr(&x112, &x56); | |||||
for (i = 0; i < 55; i++) { | |||||
secp256k1_scalar_sqr(&x112, &x112); | |||||
} | |||||
secp256k1_scalar_mul(&x112, &x112, &x56); | |||||
secp256k1_scalar_sqr(&x126, &x112); | |||||
for (i = 0; i < 13; i++) { | |||||
secp256k1_scalar_sqr(&x126, &x126); | |||||
} | |||||
secp256k1_scalar_mul(&x126, &x126, &x14); | |||||
/* Then accumulate the final result (t starts at x126). */ | |||||
t = &x126; | |||||
for (i = 0; i < 3; i++) { | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u5); /* 101 */ | |||||
for (i = 0; i < 4; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ | |||||
for (i = 0; i < 4; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u5); /* 101 */ | |||||
for (i = 0; i < 5; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u11); /* 1011 */ | |||||
for (i = 0; i < 4; i++) { | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u11); /* 1011 */ | |||||
for (i = 0; i < 4; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ | |||||
for (i = 0; i < 5; i++) { /* 00 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ | |||||
for (i = 0; i < 6; i++) { /* 00 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u13); /* 1101 */ | |||||
for (i = 0; i < 4; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u5); /* 101 */ | |||||
for (i = 0; i < 3; i++) { | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ | |||||
for (i = 0; i < 5; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u9); /* 1001 */ | |||||
for (i = 0; i < 6; i++) { /* 000 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u5); /* 101 */ | |||||
for (i = 0; i < 10; i++) { /* 0000000 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ | |||||
for (i = 0; i < 4; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x3); /* 111 */ | |||||
for (i = 0; i < 9; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ | |||||
for (i = 0; i < 5; i++) { /* 0 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u9); /* 1001 */ | |||||
for (i = 0; i < 6; i++) { /* 00 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u11); /* 1011 */ | |||||
for (i = 0; i < 4; i++) { | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u13); /* 1101 */ | |||||
for (i = 0; i < 5; i++) { | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &x2); /* 11 */ | |||||
for (i = 0; i < 6; i++) { /* 00 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u13); /* 1101 */ | |||||
for (i = 0; i < 10; i++) { /* 000000 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u13); /* 1101 */ | |||||
for (i = 0; i < 4; i++) { | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, &u9); /* 1001 */ | |||||
for (i = 0; i < 6; i++) { /* 00000 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(t, t, x); /* 1 */ | |||||
for (i = 0; i < 8; i++) { /* 00 */ | |||||
secp256k1_scalar_sqr(t, t); | |||||
} | |||||
secp256k1_scalar_mul(r, t, &x6); /* 111111 */ | |||||
} | |||||
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { | |||||
return !(a->d[0] & 1); | |||||
} | |||||
#endif | |||||
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { | |||||
#if defined(USE_SCALAR_INV_BUILTIN) | |||||
secp256k1_scalar_inverse(r, x); | |||||
#elif defined(USE_SCALAR_INV_NUM) | |||||
unsigned char b[32]; | |||||
secp256k1_num n, m; | |||||
secp256k1_scalar t = *x; | |||||
secp256k1_scalar_get_b32(b, &t); | |||||
secp256k1_num_set_bin(&n, b, 32); | |||||
secp256k1_scalar_order_get_num(&m); | |||||
secp256k1_num_mod_inverse(&n, &n, &m); | |||||
secp256k1_num_get_bin(b, 32, &n); | |||||
secp256k1_scalar_set_b32(r, b, NULL); | |||||
/* Verify that the inverse was computed correctly, without GMP code. */ | |||||
secp256k1_scalar_mul(&t, &t, r); | |||||
CHECK(secp256k1_scalar_is_one(&t)); | |||||
#else | |||||
#error "Please select scalar inverse implementation" | |||||
#endif | |||||
} | |||||
/* These parameters are generated using sage/gen_exhaustive_groups.sage. */ | /* These parameters are generated using sage/gen_exhaustive_groups.sage. */ | ||||
#if defined(EXHAUSTIVE_TEST_ORDER) | #if defined(EXHAUSTIVE_TEST_ORDER) | ||||
# if EXHAUSTIVE_TEST_ORDER == 13 | # if EXHAUSTIVE_TEST_ORDER == 13 | ||||
# define EXHAUSTIVE_TEST_LAMBDA 9 | # define EXHAUSTIVE_TEST_LAMBDA 9 | ||||
# elif EXHAUSTIVE_TEST_ORDER == 199 | # elif EXHAUSTIVE_TEST_ORDER == 199 | ||||
# define EXHAUSTIVE_TEST_LAMBDA 92 | # define EXHAUSTIVE_TEST_LAMBDA 92 | ||||
# else | # else | ||||
# error No known lambda for the specified exhaustive test group order. | # error No known lambda for the specified exhaustive test group order. | ||||
▲ Show 20 Lines • Show All 250 Lines • Show Last 20 Lines |