Skip to content

Commit

Permalink
Be stricter with side effects in VERIFY
Browse files Browse the repository at this point in the history
Adds a rule to CONTRIBUTING.md and makes the code adhere to it.
  • Loading branch information
real-or-random committed Jan 17, 2024
1 parent e4af41c commit a24dd53
Show file tree
Hide file tree
Showing 10 changed files with 92 additions and 76 deletions.
4 changes: 4 additions & 0 deletions CONTRIBUTING.md
Expand Up @@ -49,6 +49,10 @@ In addition, libsecp256k1 tries to maintain the following coding conventions:
* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
* When a test binary is compiled, the `VERIFY` macro will be defined.
* Use `VERIFY_CHECK(cond)` for assertions. The will only be enabled if `VERIFY` is defined.
* If you need additional code lines to prepare the call to `VERIFY_CHECK`, then wrap them and the call into `#ifdef VERIFY ... #endif`. Start a new block (see style conventions below) inside the `#ifdef VERIFY` if appropriate, and suffix `VERIFY`-only variables with `_ver`.
* In any `VERIFY` code, avoid side effects to variables used in non-`VERIFY` code. Note that `VERIFY_CHECK(cond)` is a no-op if `VERIFY` is not defined, so avoid also side effects in `cond`.

#### Style conventions

Expand Down
11 changes: 7 additions & 4 deletions src/ecmult_const_impl.h
Expand Up @@ -212,10 +212,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
secp256k1_scalar_add(&v2, &v2, &S_OFFSET);

#ifdef VERIFY
/* Verify that v1 and v2 are in range [0, 2^129-1]. */
for (i = 129; i < 256; ++i) {
VERIFY_CHECK(secp256k1_scalar_get_bits(&v1, i, 1) == 0);
VERIFY_CHECK(secp256k1_scalar_get_bits(&v2, i, 1) == 0);
{
int i_ver;
/* Verify that v1 and v2 are in range [0, 2^129-1]. */
for (i_ver = 129; i_ver < 256; ++i_ver) {
VERIFY_CHECK(secp256k1_scalar_get_bits(&v1, i_ver, 1) == 0);
VERIFY_CHECK(secp256k1_scalar_get_bits(&v2, i_ver, 1) == 0);
}
}
#endif

Expand Down
13 changes: 6 additions & 7 deletions src/ecmult_impl.h
Expand Up @@ -202,15 +202,14 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a,

bit += now;
}
VERIFY_CHECK(carry == 0);

#ifdef VERIFY
{
int verify_bit = bit;

VERIFY_CHECK(carry == 0);

while (verify_bit < 256) {
VERIFY_CHECK(secp256k1_scalar_get_bits(&s, verify_bit, 1) == 0);
verify_bit++;
int bit_ver = bit;
while (bit_ver < 256) {
VERIFY_CHECK(secp256k1_scalar_get_bits(&s, bit_ver, 1) == 0);
bit_ver++;
}
}
#endif
Expand Down
11 changes: 7 additions & 4 deletions src/field_impl.h
Expand Up @@ -132,10 +132,13 @@ static int secp256k1_fe_sqrt(secp256k1_fe * SECP256K1_RESTRICT r, const secp256k
ret = secp256k1_fe_equal(&t1, a);

#ifdef VERIFY
if (!ret) {
secp256k1_fe_negate(&t1, &t1, 1);
secp256k1_fe_normalize_var(&t1);
VERIFY_CHECK(secp256k1_fe_equal(&t1, a));
{
secp256k1_fe t1_ver;
if (!ret) {
secp256k1_fe_negate(&t1_ver, &t1, 1);
secp256k1_fe_normalize_var(&t1_ver);
VERIFY_CHECK(secp256k1_fe_equal(&t1_ver, a));
}
}
#endif
return ret;
Expand Down
30 changes: 21 additions & 9 deletions src/group_impl.h
Expand Up @@ -198,8 +198,11 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
size_t i;
size_t last_i = SIZE_MAX;
#ifdef VERIFY
for (i = 0; i < len; i++) {
SECP256K1_GEJ_VERIFY(&a[i]);
{
size_t i_ver;
for (i_ver = 0; i_ver < len; i_ver++) {
SECP256K1_GEJ_VERIFY(&a[i_ver]);
}
}
#endif

Expand Down Expand Up @@ -240,8 +243,11 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
}

#ifdef VERIFY
for (i = 0; i < len; i++) {
SECP256K1_GE_VERIFY(&r[i]);
{
size_t i_ver;
for (i_ver = 0; i_ver < len; i_ver++) {
SECP256K1_GE_VERIFY(&r[i_ver]);
}
}
#endif
}
Expand All @@ -250,9 +256,12 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se
size_t i;
secp256k1_fe zs;
#ifdef VERIFY
for (i = 0; i < len; i++) {
SECP256K1_GE_VERIFY(&a[i]);
SECP256K1_FE_VERIFY(&zr[i]);
{
size_t i_ver;
for (i_ver = 0; i_ver < len; i_ver++) {
SECP256K1_GE_VERIFY(&a[i_ver]);
SECP256K1_FE_VERIFY(&zr[i_ver]);
}
}
#endif

Expand All @@ -273,8 +282,11 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se
}

#ifdef VERIFY
for (i = 0; i < len; i++) {
SECP256K1_GE_VERIFY(&a[i]);
{
size_t i_ver;
for (i_ver = 0; i_ver < len; i_ver++) {
SECP256K1_GE_VERIFY(&a[i_ver]);
}
}
#endif
}
Expand Down
20 changes: 11 additions & 9 deletions src/modinv32_impl.h
Expand Up @@ -67,14 +67,16 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
volatile int32_t cond_add, cond_negate;

#ifdef VERIFY
/* Verify that all limbs are in range (-2^30,2^30). */
int i;
for (i = 0; i < 9; ++i) {
VERIFY_CHECK(r->v[i] >= -M30);
VERIFY_CHECK(r->v[i] <= M30);
{
/* Verify that all limbs are in range (-2^30,2^30). */
int i_ver;
for (i_ver = 0; i_ver < 9; ++i_ver) {
VERIFY_CHECK(r->v[i_ver] >= -M30);
VERIFY_CHECK(r->v[i_ver] <= M30);
}
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
}
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif

/* In a first step, add the modulus if the input is negative, and then negate if requested.
Expand Down Expand Up @@ -586,7 +588,7 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
secp256k1_modinv32_signed30 f = modinfo->modulus;
secp256k1_modinv32_signed30 g = *x;
#ifdef VERIFY
int i = 0;
int i_ver = 0;
#endif
int j, len = 9;
int32_t eta = -1; /* eta = -delta; delta is initially 1 (faster for the variable-time code) */
Expand Down Expand Up @@ -631,7 +633,7 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
--len;
}

VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */
VERIFY_CHECK(++i_ver < 25); /* We should never need more than 25*30 = 750 divsteps */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
Expand Down
20 changes: 11 additions & 9 deletions src/modinv64_impl.h
Expand Up @@ -91,14 +91,16 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
volatile int64_t cond_add, cond_negate;

#ifdef VERIFY
/* Verify that all limbs are in range (-2^62,2^62). */
int i;
for (i = 0; i < 5; ++i) {
VERIFY_CHECK(r->v[i] >= -M62);
VERIFY_CHECK(r->v[i] <= M62);
{
/* Verify that all limbs are in range (-2^62,2^62). */
int i_ver;
for (i_ver = 0; i_ver < 5; ++i_ver) {
VERIFY_CHECK(r->v[i_ver] >= -M62);
VERIFY_CHECK(r->v[i_ver] <= M62);
}
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
}
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif

/* In a first step, add the modulus if the input is negative, and then negate if requested.
Expand Down Expand Up @@ -642,7 +644,7 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
secp256k1_modinv64_signed62 f = modinfo->modulus;
secp256k1_modinv64_signed62 g = *x;
#ifdef VERIFY
int i = 0;
int i_ver = 0;
#endif
int j, len = 5;
int64_t eta = -1; /* eta = -delta; delta is initially 1 */
Expand Down Expand Up @@ -686,7 +688,7 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
--len;
}

VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
VERIFY_CHECK(++i_ver < 12); /* We should never need more than 12*62 = 744 divsteps */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
Expand Down
24 changes: 12 additions & 12 deletions src/modules/ellswift/main_impl.h
Expand Up @@ -187,7 +187,7 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
* - If (c & 5) = 5: return -w*(c4*u + v).
*/
secp256k1_fe x = *x_in, u = *u_in, g, v, s, m, r, q;
int ret;
int ret_ver;

secp256k1_fe_normalize_weak(&x);
secp256k1_fe_normalize_weak(&u);
Expand Down Expand Up @@ -266,11 +266,11 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
secp256k1_fe_mul(&q, &q, &s); /* q = s*(4*(u^3+7)+3*u^2*s) */
secp256k1_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */
if (!secp256k1_fe_is_square_var(&q)) return 0;
ret = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */
ret_ver = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */
#ifdef VERIFY
VERIFY_CHECK(ret);
VERIFY_CHECK(ret_ver);
#else
(void)ret;
(void)ret_ver;
#endif

/* If (c & 1) = 1 and r = 0, fail. */
Expand All @@ -287,8 +287,8 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
}

/* Let w = sqrt(s). */
ret = secp256k1_fe_sqrt(&m, &s); /* m = sqrt(s) = w */
VERIFY_CHECK(ret);
ret_ver = secp256k1_fe_sqrt(&m, &s); /* m = sqrt(s) = w */
VERIFY_CHECK(ret_ver);

/* Return logic. */
if ((c & 5) == 0 || (c & 5) == 5) {
Expand All @@ -311,7 +311,7 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256
secp256k1_sha256 hash = *hasher;
unsigned char buf4[4];
#ifdef VERIFY
size_t blocks = hash.bytes >> 6;
size_t blocks_ver = hash.bytes >> 6;
#endif
buf4[0] = cnt;
buf4[1] = cnt >> 8;
Expand All @@ -321,7 +321,7 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256
secp256k1_sha256_finalize(&hash, out32);

/* Writing and finalizing together should trigger exactly one SHA256 compression. */
VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1));
VERIFY_CHECK(((hash.bytes) >> 6) == (blocks_ver + 1));
}

/** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate.
Expand Down Expand Up @@ -407,17 +407,17 @@ int secp256k1_ellswift_encode(const secp256k1_context *ctx, unsigned char *ell64
secp256k1_fe t;
unsigned char p64[64] = {0};
size_t ser_size;
int ser_ret;
int ser_ret_ver;
secp256k1_sha256 hash;

/* Set up hasher state; the used RNG is H(pubkey || "\x00"*31 || rnd32 || cnt++), using
* BIP340 tagged hash with tag "secp256k1_ellswift_encode". */
secp256k1_ellswift_sha256_init_encode(&hash);
ser_ret = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1);
ser_ret_ver = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1);
#ifdef VERIFY
VERIFY_CHECK(ser_ret && ser_size == 33);
VERIFY_CHECK(ser_ret_ver && ser_size == 33);
#else
(void)ser_ret;
(void)ser_ret_ver;
#endif
secp256k1_sha256_write(&hash, p64, sizeof(p64));
secp256k1_sha256_write(&hash, rnd32, 32);
Expand Down
25 changes: 11 additions & 14 deletions src/scalar_4x64_impl.h
Expand Up @@ -227,12 +227,15 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
r->d[3] = secp256k1_u128_to_u64(&t) + (a->d[3] >> 1) + (SECP256K1_N_H_3 & mask);
#ifdef VERIFY
/* The line above only computed the bottom 64 bits of r->d[3]; redo the computation
* in full 128 bits to make sure the top 64 bits are indeed zero. */
secp256k1_u128_accum_u64(&t, a->d[3] >> 1);
secp256k1_u128_accum_u64(&t, SECP256K1_N_H_3 & mask);
secp256k1_u128_rshift(&t, 64);
VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0);
{
secp256k1_uint128 t_ver;
/* The line above only computed the bottom 64 bits of r->d[3]; redo the computation
* in full 128 bits to make sure the top 64 bits are indeed zero. */
secp256k1_u128_accum_u64(&t_ver, a->d[3] >> 1);
secp256k1_u128_accum_u64(&t_ver, SECP256K1_N_H_3 & mask);
secp256k1_u128_rshift(&t_ver, 64);
VERIFY_CHECK(secp256k1_u128_to_u64(&t_ver) == 0);
}

SECP256K1_SCALAR_VERIFY(r);
#endif
Expand Down Expand Up @@ -948,32 +951,26 @@ static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar = {

static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_modinv64_signed62 s;
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
SECP256K1_SCALAR_VERIFY(x);

secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);

VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x));
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
}

static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_modinv64_signed62 s;
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
SECP256K1_SCALAR_VERIFY(x);

secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);

VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x));
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
}

SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
Expand Down
10 changes: 2 additions & 8 deletions src/scalar_8x32_impl.h
Expand Up @@ -789,32 +789,26 @@ static const secp256k1_modinv32_modinfo secp256k1_const_modinfo_scalar = {

static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_modinv32_signed30 s;
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
SECP256K1_SCALAR_VERIFY(x);

secp256k1_scalar_to_signed30(&s, x);
secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed30(r, &s);

VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x));
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
}

static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_modinv32_signed30 s;
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
SECP256K1_SCALAR_VERIFY(x);

secp256k1_scalar_to_signed30(&s, x);
secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed30(r, &s);

VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x));
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
}

SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
Expand Down

0 comments on commit a24dd53

Please sign in to comment.