mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-05-04 00:41:36 +02:00
c6b6b8f1bb Merge #830: Rip out non-endomorphism code + dependencies c582abade1 Consistency improvements to the comments 63c6b71616 Reorder comments/function around scalar_split_lambda 2edc514c90 WNAF of lambda_split output has max size 129 4232e5b7da Rip out non-endomorphism code ebad8414b0 Check correctness of lambda split without -DVERIFY fe7fc1fda8 Make lambda constant accessible 9d2f2b44d8 Add tests to exercise lambda split near bounds 9aca2f7f07 Add secp256k1_split_lambda_verify acab934d24 Detailed comments for secp256k1_scalar_split_lambda 76ed922a5f Increase precision of g1 and g2 6173839c90 Switch to our own memcmp function 63150ab4da Merge #827: Rename testrand functions to have test in name c5257aed0b Merge #821: travis: Explicitly set --with-valgrind bb1f54280f Merge #818: Add static assertion that uint32_t is unsigned int or wider a45c1fa63c Rename testrand functions to have test in name 5006895bd6 Merge #808: Exhaustive test improvements + exhaustive schnorrsig tests 4eecb4d6ef travis: VALGRIND->RUN_VALGRIND to avoid confusion with WITH_VALGRIND 66a765c775 travis: Explicitly set --with-valgrind d7838ba6a6 Merge #813: Enable configuring Valgrind support 7ceb0b7611 Merge #819: Enable -Wundef warning 8b7dcdd955 Add exhaustive test for extrakeys and schnorrsig 08d7d89299 Make pubkey parsing test whether points are in the correct subgroup 87af00b511 Abstract out challenge computation in schnorrsig 63e1b2aa7d Disable output buffering in tests_exhaustive.c 39f67dd072 Support splitting exhaustive tests across cores e99b26fcd5 Give exhaustive_tests count and seed cmdline inputs 49e6630bca refactor: move RNG seeding to testrand b110c106fa Change exhaustive test groups so they have a point with X=1 cec7b18a34 Select exhaustive lambda in function of order 78f6cdfaae Make the curve B constant a secp256k1_fe d7f39ae4b6 Delete gej_is_valid_var: unused outside tests 8bcd78cd79 Make secp256k1_scalar_b32 detect overflow in scalar_low c498366e5b Move exhaustive tests for recovery to module be31791543 Make group order purely compile-time in exhaustive tests e73ff30922 Enable -Wundef warning c0041b5cfc Add static assertion that uint32_t is unsigned int or wider 4ad408faf3 Merge #782: Check if variable=yes instead of if var is set in travis.sh 412bf874d0 configure: Allow specifying --with[out]-valgrind explicitly 34debf7a6d Modify .travis.yml to explictly pass no in env vars instead of setting to nothing a0e99fc121 Merge #814: tests: Initialize random group elements fully 5738e8622d tests: Initialize random group elements fully c9939ba55d Merge #812: travis: run bench_schnorrsig a51f2af62b travis: run bench_schnorrsig ef37761fee Change travis.sh to check if variables are equal to yes instead of not-empty. Before this, setting `VALGRIND=wat` was considered as true, and to make it evaluate as false you had to unset the variable `VALGRIND=` but not it checks if `VALGRIND=yes` and if it's not `yes` then it's evaluated to false git-subtree-dir: src/secp256k1 git-subtree-split: c6b6b8f1bb044d7d1aa065ebb674adde98a36a8e
255 lines
10 KiB
C
255 lines
10 KiB
C
/**********************************************************************
|
|
* Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
|
|
* Distributed under the MIT software license, see the accompanying *
|
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|
**********************************************************************/
|
|
|
|
#ifndef SECP256K1_ECMULT_CONST_IMPL_H
|
|
#define SECP256K1_ECMULT_CONST_IMPL_H
|
|
|
|
#include "scalar.h"
|
|
#include "group.h"
|
|
#include "ecmult_const.h"
|
|
#include "ecmult_impl.h"
|
|
|
|
/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
|
|
#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
|
|
int m = 0; \
|
|
/* Extract the sign-bit for a constant time absolute-value. */ \
|
|
int mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
|
|
int abs_n = ((n) + mask) ^ mask; \
|
|
int idx_n = abs_n >> 1; \
|
|
secp256k1_fe neg_y; \
|
|
VERIFY_CHECK(((n) & 1) == 1); \
|
|
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
|
|
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
|
|
VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
|
|
VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
|
|
/* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \
|
|
* or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \
|
|
(r)->x = (pre)[m].x; \
|
|
(r)->y = (pre)[m].y; \
|
|
for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \
|
|
/* This loop is used to avoid secret data in array indices. See
|
|
* the comment in ecmult_gen_impl.h for rationale. */ \
|
|
secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
|
|
secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
|
|
} \
|
|
(r)->infinity = 0; \
|
|
secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
|
|
secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
|
|
} while(0)
|
|
|
|
|
|
/** Convert a number to WNAF notation.
|
|
* The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val.
|
|
* It has the following guarantees:
|
|
* - each wnaf[i] an odd integer between -(1 << w) and (1 << w)
|
|
* - each wnaf[i] is nonzero
|
|
* - the number of words set is always WNAF_SIZE(w) + 1
|
|
*
|
|
* Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar
|
|
* Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.)
|
|
* CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlag Berlin Heidelberg 2003
|
|
*
|
|
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
|
|
*/
|
|
static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
|
|
int global_sign;
|
|
int skew = 0;
|
|
int word = 0;
|
|
|
|
/* 1 2 3 */
|
|
int u_last;
|
|
int u;
|
|
|
|
int flip;
|
|
int bit;
|
|
secp256k1_scalar s;
|
|
int not_neg_one;
|
|
|
|
VERIFY_CHECK(w > 0);
|
|
VERIFY_CHECK(size > 0);
|
|
|
|
/* Note that we cannot handle even numbers by negating them to be odd, as is
|
|
* done in other implementations, since if our scalars were specified to have
|
|
* width < 256 for performance reasons, their negations would have width 256
|
|
* and we'd lose any performance benefit. Instead, we use a technique from
|
|
* Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
|
|
* or 2 (for odd) to the number we are encoding, returning a skew value indicating
|
|
* this, and having the caller compensate after doing the multiplication.
|
|
*
|
|
* In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in
|
|
* particular, to ensure that the outputs from the endomorphism-split fit into
|
|
* 128 bits). If we negate, the parity of our number flips, inverting which of
|
|
* {1, 2} we want to add to the scalar when ensuring that it's odd. Further
|
|
* complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
|
|
* we need to special-case it in this logic. */
|
|
flip = secp256k1_scalar_is_high(scalar);
|
|
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
|
|
bit = flip ^ !secp256k1_scalar_is_even(scalar);
|
|
/* We check for negative one, since adding 2 to it will cause an overflow */
|
|
secp256k1_scalar_negate(&s, scalar);
|
|
not_neg_one = !secp256k1_scalar_is_one(&s);
|
|
s = *scalar;
|
|
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
|
|
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
|
|
* that we added two to it and flipped it. In fact for -1 these operations are
|
|
* identical. We only flipped, but since skewing is required (in the sense that
|
|
* the skew must be 1 or 2, never zero) and flipping is not, we need to change
|
|
* our flags to claim that we only skewed. */
|
|
global_sign = secp256k1_scalar_cond_negate(&s, flip);
|
|
global_sign *= not_neg_one * 2 - 1;
|
|
skew = 1 << bit;
|
|
|
|
/* 4 */
|
|
u_last = secp256k1_scalar_shr_int(&s, w);
|
|
do {
|
|
int even;
|
|
|
|
/* 4.1 4.4 */
|
|
u = secp256k1_scalar_shr_int(&s, w);
|
|
/* 4.2 */
|
|
even = ((u & 1) == 0);
|
|
/* In contrast to the original algorithm, u_last is always > 0 and
|
|
* therefore we do not need to check its sign. In particular, it's easy
|
|
* to see that u_last is never < 0 because u is never < 0. Moreover,
|
|
* u_last is never = 0 because u is never even after a loop
|
|
* iteration. The same holds analogously for the initial value of
|
|
* u_last (in the first loop iteration). */
|
|
VERIFY_CHECK(u_last > 0);
|
|
VERIFY_CHECK((u_last & 1) == 1);
|
|
u += even;
|
|
u_last -= even * (1 << w);
|
|
|
|
/* 4.3, adapted for global sign change */
|
|
wnaf[word++] = u_last * global_sign;
|
|
|
|
u_last = u;
|
|
} while (word * w < size);
|
|
wnaf[word] = u * global_sign;
|
|
|
|
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
|
|
VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
|
|
return skew;
|
|
}
|
|
|
|
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar, int size) {
|
|
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
|
|
secp256k1_ge tmpa;
|
|
secp256k1_fe Z;
|
|
|
|
int skew_1;
|
|
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
|
|
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
|
|
int skew_lam;
|
|
secp256k1_scalar q_1, q_lam;
|
|
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
|
|
|
|
int i;
|
|
|
|
/* build wnaf representation for q. */
|
|
int rsize = size;
|
|
if (size > 128) {
|
|
rsize = 128;
|
|
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
|
|
secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
|
|
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
|
|
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
|
|
} else
|
|
{
|
|
skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
|
|
skew_lam = 0;
|
|
}
|
|
|
|
/* Calculate odd multiples of a.
|
|
* All multiples are brought to the same Z 'denominator', which is stored
|
|
* in Z. Due to secp256k1' isomorphism we can do all operations pretending
|
|
* that the Z coordinate was 1, use affine addition formulae, and correct
|
|
* the Z coordinate of the result once at the end.
|
|
*/
|
|
secp256k1_gej_set_ge(r, a);
|
|
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
|
|
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
|
|
secp256k1_fe_normalize_weak(&pre_a[i].y);
|
|
}
|
|
if (size > 128) {
|
|
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
|
|
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
|
|
}
|
|
|
|
}
|
|
|
|
/* first loop iteration (separated out so we can directly set r, rather
|
|
* than having it start at infinity, get doubled several times, then have
|
|
* its new value added to it) */
|
|
i = wnaf_1[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
|
|
VERIFY_CHECK(i != 0);
|
|
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
|
|
secp256k1_gej_set_ge(r, &tmpa);
|
|
if (size > 128) {
|
|
i = wnaf_lam[WNAF_SIZE_BITS(rsize, WINDOW_A - 1)];
|
|
VERIFY_CHECK(i != 0);
|
|
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
|
|
secp256k1_gej_add_ge(r, r, &tmpa);
|
|
}
|
|
/* remaining loop iterations */
|
|
for (i = WNAF_SIZE_BITS(rsize, WINDOW_A - 1) - 1; i >= 0; i--) {
|
|
int n;
|
|
int j;
|
|
for (j = 0; j < WINDOW_A - 1; ++j) {
|
|
secp256k1_gej_double(r, r);
|
|
}
|
|
|
|
n = wnaf_1[i];
|
|
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
|
|
VERIFY_CHECK(n != 0);
|
|
secp256k1_gej_add_ge(r, r, &tmpa);
|
|
if (size > 128) {
|
|
n = wnaf_lam[i];
|
|
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
|
|
VERIFY_CHECK(n != 0);
|
|
secp256k1_gej_add_ge(r, r, &tmpa);
|
|
}
|
|
}
|
|
|
|
secp256k1_fe_mul(&r->z, &r->z, &Z);
|
|
|
|
{
|
|
/* Correct for wNAF skew */
|
|
secp256k1_ge correction = *a;
|
|
secp256k1_ge_storage correction_1_stor;
|
|
secp256k1_ge_storage correction_lam_stor;
|
|
secp256k1_ge_storage a2_stor;
|
|
secp256k1_gej tmpj;
|
|
secp256k1_gej_set_ge(&tmpj, &correction);
|
|
secp256k1_gej_double_var(&tmpj, &tmpj, NULL);
|
|
secp256k1_ge_set_gej(&correction, &tmpj);
|
|
secp256k1_ge_to_storage(&correction_1_stor, a);
|
|
if (size > 128) {
|
|
secp256k1_ge_to_storage(&correction_lam_stor, a);
|
|
}
|
|
secp256k1_ge_to_storage(&a2_stor, &correction);
|
|
|
|
/* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
|
|
secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2);
|
|
if (size > 128) {
|
|
secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2);
|
|
}
|
|
|
|
/* Apply the correction */
|
|
secp256k1_ge_from_storage(&correction, &correction_1_stor);
|
|
secp256k1_ge_neg(&correction, &correction);
|
|
secp256k1_gej_add_ge(r, r, &correction);
|
|
|
|
if (size > 128) {
|
|
secp256k1_ge_from_storage(&correction, &correction_lam_stor);
|
|
secp256k1_ge_neg(&correction, &correction);
|
|
secp256k1_ge_mul_lambda(&correction, &correction);
|
|
secp256k1_gej_add_ge(r, r, &correction);
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif /* SECP256K1_ECMULT_CONST_IMPL_H */
|