diff --git a/Cargo.lock b/Cargo.lock index 4e962c9049..5c1631a709 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3318,6 +3318,7 @@ version = "2.2.7" dependencies = [ "async-trait", "indexmap 2.2.2", + "lru", "parking_lot", "rand", "snarkvm", @@ -3526,7 +3527,7 @@ dependencies = [ [[package]] name = "snarkvm" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "anstyle", "anyhow", @@ -3557,7 +3558,7 @@ dependencies = [ [[package]] name = "snarkvm-algorithms" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -3587,7 +3588,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-account", "snarkvm-circuit-algorithms", @@ -3601,7 +3602,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-account" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-algorithms", "snarkvm-circuit-network", @@ -3612,7 +3613,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-algorithms" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-types", "snarkvm-console-algorithms", @@ -3622,7 +3623,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-collections" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-algorithms", "snarkvm-circuit-types", @@ -3632,7 +3633,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-environment" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "indexmap 2.2.2", "itertools 0.11.0", @@ -3650,12 +3651,12 @@ dependencies = [ [[package]] name = "snarkvm-circuit-environment-witness" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" [[package]] name = "snarkvm-circuit-network" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-algorithms", "snarkvm-circuit-collections", @@ -3666,7 +3667,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-program" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "paste", "snarkvm-circuit-account", @@ -3681,7 +3682,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-address", @@ -3696,7 +3697,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-address" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3709,7 +3710,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-boolean" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-console-types-boolean", @@ -3718,7 +3719,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-field" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3728,7 +3729,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-group" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3740,7 +3741,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-integers" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3752,7 +3753,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-scalar" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3763,7 +3764,7 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-string" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3775,7 +3776,7 @@ dependencies = [ [[package]] name = "snarkvm-console" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-account", "snarkvm-console-algorithms", @@ -3788,7 +3789,7 @@ dependencies = [ [[package]] name = "snarkvm-console-account" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "bs58", "snarkvm-console-network", @@ -3799,7 +3800,7 @@ dependencies = [ [[package]] name = "snarkvm-console-algorithms" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "blake2s_simd", "smallvec", @@ -3812,7 +3813,7 @@ dependencies = [ [[package]] name = "snarkvm-console-collections" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "rayon", @@ -3823,7 +3824,7 @@ dependencies = [ [[package]] name = "snarkvm-console-network" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "anyhow", "indexmap 2.2.2", @@ -3846,7 +3847,7 @@ dependencies = [ [[package]] name = "snarkvm-console-network-environment" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "anyhow", "bech32", @@ -3864,7 +3865,7 @@ dependencies = [ [[package]] name = "snarkvm-console-program" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "enum_index", "enum_index_derive", @@ -3885,7 +3886,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-address", @@ -3900,7 +3901,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-address" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3911,7 +3912,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-boolean" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", ] @@ -3919,7 +3920,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-field" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3929,7 +3930,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-group" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3940,7 +3941,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-integers" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3951,7 +3952,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-scalar" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3962,7 +3963,7 @@ dependencies = [ [[package]] name = "snarkvm-console-types-string" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3973,7 +3974,7 @@ dependencies = [ [[package]] name = "snarkvm-curves" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "rand", "rayon", @@ -3987,7 +3988,7 @@ dependencies = [ [[package]] name = "snarkvm-fields" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -4004,7 +4005,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -4029,7 +4030,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-authority" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "anyhow", "rand", @@ -4041,7 +4042,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-block" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "indexmap 2.2.2", "rayon", @@ -4060,7 +4061,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-coinbase" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -4080,7 +4081,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-committee" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "anyhow", "indexmap 2.2.2", @@ -4097,7 +4098,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-ledger-narwhal-batch-certificate", "snarkvm-ledger-narwhal-batch-header", @@ -4110,7 +4111,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-batch-certificate" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "indexmap 2.2.2", "rayon", @@ -4123,7 +4124,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-batch-header" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "indexmap 2.2.2", "serde_json", @@ -4135,7 +4136,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-data" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "bytes", "serde_json", @@ -4146,7 +4147,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-subdag" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "indexmap 2.2.2", "rayon", @@ -4160,7 +4161,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-transmission" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "bytes", "serde_json", @@ -4173,7 +4174,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-transmission-id" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "snarkvm-console", "snarkvm-ledger-coinbase", @@ -4182,7 +4183,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-query" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "async-trait", "reqwest", @@ -4195,7 +4196,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-store" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std-storage", "anyhow", @@ -4220,7 +4221,7 @@ dependencies = [ [[package]] name = "snarkvm-ledger-test-helpers" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "once_cell", "snarkvm-circuit", @@ -4235,7 +4236,7 @@ dependencies = [ [[package]] name = "snarkvm-metrics" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "metrics", "metrics-exporter-prometheus", @@ -4244,7 +4245,7 @@ dependencies = [ [[package]] name = "snarkvm-parameters" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -4269,7 +4270,7 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -4295,7 +4296,7 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer-process" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "colored", @@ -4318,7 +4319,7 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer-program" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "indexmap 2.2.2", "paste", @@ -4332,7 +4333,7 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer-snark" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "bincode", "once_cell", @@ -4345,7 +4346,7 @@ dependencies = [ [[package]] name = "snarkvm-utilities" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "aleo-std", "anyhow", @@ -4366,7 +4367,7 @@ dependencies = [ [[package]] name = "snarkvm-utilities-derives" version = "0.16.19" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=f3779c9#f3779c958b3516bca453c23c92caebe33d925b1d" +source = "git+https://github.com/AleoHQ/snarkVM.git?rev=94d1135#94d113519b90441172d4caab022b622535b3bb3a" dependencies = [ "proc-macro2", "quote 1.0.35", diff --git a/Cargo.toml b/Cargo.toml index ac78e1d9cf..c22bb240df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,6 @@ name = "snarkos" path = "snarkos/main.rs" [features] -jemalloc = [ "tikv-jemallocator" ] metrics = [ "snarkos-node-metrics", "snarkos-node/metrics" ] [dependencies.anyhow] @@ -120,9 +119,8 @@ version = "=2.2.7" path = "./node/tcp" version = "=2.2.7" -[dependencies.tikv-jemallocator] -version = "0.5" -optional = true +[target.'cfg(all(target_os = "linux", target_arch = "x86_64"))'.dependencies] +tikv-jemallocator = "0.5" [dev-dependencies.rusty-hook] version = "0.11.2" diff --git a/cli/src/commands/start.rs b/cli/src/commands/start.rs index fdaec4ed58..281c26e794 100644 --- a/cli/src/commands/start.rs +++ b/cli/src/commands/start.rs @@ -248,7 +248,7 @@ impl Start { let _ = PrivateKey::::new(&mut rng)?; } let private_key = PrivateKey::::new(&mut rng)?; - println!("🔑 Your development private key for node {dev} is {}\n", private_key.to_string().bold()); + println!("🔑 Your development private key for node {dev} is {}.\n", private_key.to_string().bold()); private_key }) } @@ -411,7 +411,7 @@ impl Start { // If the display is not enabled, render the welcome message. if self.nodisplay { // Print the Aleo address. - println!("🪪 Your Aleo address is {}.\n", account.address().to_string().bold()); + println!("👛 Your Aleo address is {}.\n", account.address().to_string().bold()); // Print the node type and network. println!( "🧭 Starting {} on {} {} at {}.\n", @@ -465,23 +465,12 @@ impl Start { fn runtime() -> Runtime { // Retrieve the number of cores. let num_cores = num_cpus::get(); - // Determine the number of main cores. - let main_cores = match num_cores { - // Insufficient - 0..=3 => { - eprintln!("The number of cores is insufficient, at least 4 are needed."); - std::process::exit(1); - } - // Efficiency mode - 4..=8 => 2, - // Standard mode - 9..=16 => 8, - // Performance mode - _ => 16, - }; + // Initialize the number of tokio worker threads, max tokio blocking threads, and rayon cores. + // Note: We intentionally set the number of tokio worker threads and number of rayon cores to be + // more than the number of physical cores, because the node is expected to be I/O-bound. let (num_tokio_worker_threads, max_tokio_blocking_threads, num_rayon_cores_global) = - { (num_cores.min(main_cores), 512, num_cores.saturating_sub(main_cores).max(1)) }; + (2 * num_cores, 512, num_cores); // Initialize the parallelization parameters. rayon::ThreadPoolBuilder::new() diff --git a/cli/src/helpers/mod.rs b/cli/src/helpers/mod.rs index 6cabe254b2..de3838b1ad 100644 --- a/cli/src/helpers/mod.rs +++ b/cli/src/helpers/mod.rs @@ -41,8 +41,8 @@ pub fn check_open_files_limit(minimum: u64) { // Warn about too low limit. let warning = [ format!("⚠️ The open files limit ({soft_limit}) for this process is lower than recommended."), - format!("⚠️ To ensure correct behavior of the node, please raise it to at least {minimum}."), - "⚠️ See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), + format!(" • To ensure correct behavior of the node, please raise it to at least {minimum}."), + " • See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), ] .join("\n") .yellow() @@ -54,8 +54,8 @@ pub fn check_open_files_limit(minimum: u64) { // Warn about unknown limit. let warning = [ format!("⚠️ Unable to check the open files limit for this process due to {err}."), - format!("⚠️ To ensure correct behavior of the node, please ensure it is at least {minimum}."), - "⚠️ See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), + format!(" • To ensure correct behavior of the node, please ensure it is at least {minimum}."), + " • See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), ] .join("\n") .yellow() diff --git a/node/bft/ledger-service/Cargo.toml b/node/bft/ledger-service/Cargo.toml index 8a37cdb97e..40d0ca87b8 100644 --- a/node/bft/ledger-service/Cargo.toml +++ b/node/bft/ledger-service/Cargo.toml @@ -18,7 +18,7 @@ edition = "2021" [features] default = [ ] -ledger = [ "rand", "tokio", "tracing" ] +ledger = [ "parking_lot", "rand", "tokio", "tracing" ] ledger-write = [ ] mock = [ "parking_lot", "tracing" ] prover = [ ] @@ -32,6 +32,9 @@ version = "0.1" version = "2.1" features = [ "serde", "rayon" ] +[dependencies.lru] +version = "0.12" + [dependencies.parking_lot] version = "0.12" optional = true diff --git a/node/bft/ledger-service/src/ledger.rs b/node/bft/ledger-service/src/ledger.rs index 1f42f909b0..211ea55332 100644 --- a/node/bft/ledger-service/src/ledger.rs +++ b/node/bft/ledger-service/src/ledger.rs @@ -26,6 +26,8 @@ use snarkvm::{ }; use indexmap::IndexMap; +use lru::LruCache; +use parking_lot::Mutex; use std::{ fmt, ops::Range, @@ -35,10 +37,14 @@ use std::{ }, }; +/// The capacity of the LRU holiding the recently queried committees. +const COMMITTEE_CACHE_SIZE: usize = 16; + /// A core ledger service. pub struct CoreLedgerService> { ledger: Ledger, coinbase_verifying_key: Arc>, + committee_cache: Arc>>>, shutdown: Arc, } @@ -46,7 +52,8 @@ impl> CoreLedgerService { /// Initializes a new core ledger service. pub fn new(ledger: Ledger, shutdown: Arc) -> Self { let coinbase_verifying_key = Arc::new(ledger.coinbase_puzzle().coinbase_verifying_key().clone()); - Self { ledger, coinbase_verifying_key, shutdown } + let committee_cache = Arc::new(Mutex::new(LruCache::new(COMMITTEE_CACHE_SIZE.try_into().unwrap()))); + Self { ledger, coinbase_verifying_key, committee_cache, shutdown } } } @@ -127,9 +134,19 @@ impl> LedgerService for CoreLedgerService< /// Returns the committee for the given round. /// If the given round is in the future, then the current committee is returned. fn get_committee_for_round(&self, round: u64) -> Result> { + // Check if the committee is already in the cache. + if let Some(committee) = self.committee_cache.lock().get(&round) { + return Ok(committee.clone()); + } + match self.ledger.get_committee_for_round(round)? { // Return the committee if it exists. - Some(committee) => Ok(committee), + Some(committee) => { + // Insert the committee into the cache. + self.committee_cache.lock().push(round, committee.clone()); + // Return the committee. + Ok(committee) + } // Return the current committee if the round is in the future. None => { // Retrieve the current committee. @@ -174,8 +191,8 @@ impl> LedgerService for CoreLedgerService< } } - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, transmission: &mut Transmission, @@ -185,6 +202,7 @@ impl> LedgerService for CoreLedgerService< (TransmissionID::Transaction(expected_transaction_id), Transmission::Transaction(transaction_data)) => { match transaction_data.clone().deserialize_blocking() { Ok(transaction) => { + // Ensure the transaction ID matches the expected transaction ID. if transaction.id() != expected_transaction_id { bail!( "Received mismatching transaction ID - expected {}, found {}", @@ -193,6 +211,11 @@ impl> LedgerService for CoreLedgerService< ); } + // Ensure the transaction is not a fee transaction. + if transaction.is_fee() { + bail!("Received a fee transaction in a transmission"); + } + // Update the transmission with the deserialized transaction. *transaction_data = Data::Object(transaction); } diff --git a/node/bft/ledger-service/src/mock.rs b/node/bft/ledger-service/src/mock.rs index c36679a40e..00236723ab 100644 --- a/node/bft/ledger-service/src/mock.rs +++ b/node/bft/ledger-service/src/mock.rs @@ -143,8 +143,8 @@ impl LedgerService for MockLedgerService { Ok(false) } - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, _transmission: &mut Transmission, diff --git a/node/bft/ledger-service/src/prover.rs b/node/bft/ledger-service/src/prover.rs index 6edfbafd4c..be3e181aaf 100644 --- a/node/bft/ledger-service/src/prover.rs +++ b/node/bft/ledger-service/src/prover.rs @@ -124,8 +124,8 @@ impl LedgerService for ProverLedgerService { bail!("Transmission '{transmission_id}' does not exist in prover") } - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, _transmission_id: TransmissionID, _transmission: &mut Transmission, diff --git a/node/bft/ledger-service/src/traits.rs b/node/bft/ledger-service/src/traits.rs index a97fdeb373..ff25ae0ca7 100644 --- a/node/bft/ledger-service/src/traits.rs +++ b/node/bft/ledger-service/src/traits.rs @@ -78,8 +78,8 @@ pub trait LedgerService: Debug + Send + Sync { /// Returns `true` if the ledger contains the given transmission ID. fn contains_transmission(&self, transmission_id: &TransmissionID) -> Result; - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, transmission: &mut Transmission, diff --git a/node/bft/ledger-service/src/translucent.rs b/node/bft/ledger-service/src/translucent.rs index 34afa15667..09c0c1792c 100644 --- a/node/bft/ledger-service/src/translucent.rs +++ b/node/bft/ledger-service/src/translucent.rs @@ -135,7 +135,7 @@ impl> LedgerService for TranslucentLedgerS } /// Always succeeds. - fn ensure_transmission_id_matches( + fn ensure_transmission_is_well_formed( &self, _transmission_id: TransmissionID, _transmission: &mut Transmission, diff --git a/node/bft/src/bft.rs b/node/bft/src/bft.rs index 163e2c5bc7..65cce88743 100644 --- a/node/bft/src/bft.rs +++ b/node/bft/src/bft.rs @@ -254,6 +254,7 @@ impl BFT { // Update to the next round in storage. if let Err(e) = self.storage().increment_to_next_round(current_round) { warn!("BFT failed to increment to the next round from round {current_round} - {e}"); + return false; } // Update the timer for the leader certificate. self.leader_certificate_timer.store(now(), Ordering::SeqCst); @@ -483,16 +484,16 @@ impl BFT { } /* Proceeding to commit the leader. */ - info!("Proceeding to commit round {commit_round} with leader {leader}..."); + info!("Proceeding to commit round {commit_round} with leader '{}'", fmt_id(leader)); // Prepare the election certificate IDs. let election_certificate_ids = certificates.values().map(|c| c.id()).collect::>(); // Commit the leader certificate, and all previous leader certificates since the last committed round. - self.commit_leader_certificate::(leader_certificate, election_certificate_ids).await + self.commit_leader_certificate::(leader_certificate, election_certificate_ids).await } /// Commits the leader certificate, and all previous leader certificates since the last committed round. - async fn commit_leader_certificate( + async fn commit_leader_certificate( &self, leader_certificate: BatchCertificate, election_certificate_ids: IndexSet>, @@ -549,10 +550,6 @@ impl BFT { let mut transmissions = IndexMap::new(); // Start from the oldest leader certificate. for certificate in commit_subdag.values().flatten() { - // Update the DAG. - if IS_SYNCING { - self.dag.write().commit(certificate, self.storage().max_gc_rounds()); - } // Retrieve the transmissions. for transmission_id in certificate.transmission_ids() { // If the transmission already exists in the map, skip it. @@ -576,51 +573,49 @@ impl BFT { transmissions.insert(*transmission_id, transmission); } } - // If the node is not syncing, trigger consensus, as this will build a new block for the ledger. - if !IS_SYNCING { - // Construct the subdag. - let subdag = Subdag::from(commit_subdag.clone(), election_certificate_ids.clone())?; - // Retrieve the anchor round. - let anchor_round = subdag.anchor_round(); - // Retrieve the number of transmissions. - let num_transmissions = transmissions.len(); - // Retrieve metadata about the subdag. - let subdag_metadata = subdag.iter().map(|(round, c)| (*round, c.len())).collect::>(); - - // Ensure the subdag anchor round matches the leader round. - ensure!( - anchor_round == leader_round, - "BFT failed to commit - the subdag anchor round {anchor_round} does not match the leader round {leader_round}", - ); - - // Trigger consensus. - if let Some(consensus_sender) = self.consensus_sender.get() { - // Initialize a callback sender and receiver. - let (callback_sender, callback_receiver) = oneshot::channel(); - // Send the subdag and transmissions to consensus. - consensus_sender.tx_consensus_subdag.send((subdag, transmissions, callback_sender)).await?; - // Await the callback to continue. - match callback_receiver.await { - Ok(Ok(())) => (), // continue - Ok(Err(e)) => { - error!("BFT failed to advance the subdag for round {anchor_round} - {e}"); - return Ok(()); - } - Err(e) => { - error!("BFT failed to receive the callback for round {anchor_round} - {e}"); - return Ok(()); - } + // Trigger consensus, as this will build a new block for the ledger. + // Construct the subdag. + let subdag = Subdag::from(commit_subdag.clone(), election_certificate_ids.clone())?; + // Retrieve the anchor round. + let anchor_round = subdag.anchor_round(); + // Retrieve the number of transmissions. + let num_transmissions = transmissions.len(); + // Retrieve metadata about the subdag. + let subdag_metadata = subdag.iter().map(|(round, c)| (*round, c.len())).collect::>(); + + // Ensure the subdag anchor round matches the leader round. + ensure!( + anchor_round == leader_round, + "BFT failed to commit - the subdag anchor round {anchor_round} does not match the leader round {leader_round}", + ); + + // Trigger consensus. + if let Some(consensus_sender) = self.consensus_sender.get() { + // Initialize a callback sender and receiver. + let (callback_sender, callback_receiver) = oneshot::channel(); + // Send the subdag and transmissions to consensus. + consensus_sender.tx_consensus_subdag.send((subdag, transmissions, callback_sender)).await?; + // Await the callback to continue. + match callback_receiver.await { + Ok(Ok(())) => (), // continue + Ok(Err(e)) => { + error!("BFT failed to advance the subdag for round {anchor_round} - {e}"); + return Ok(()); + } + Err(e) => { + error!("BFT failed to receive the callback for round {anchor_round} - {e}"); + return Ok(()); } } + } - info!( - "\n\nCommitting a subdag from round {anchor_round} with {num_transmissions} transmissions: {subdag_metadata:?}\n" - ); - // Update the DAG, as the subdag was successfully included into a block. - let mut dag_write = self.dag.write(); - for certificate in commit_subdag.values().flatten() { - dag_write.commit(certificate, self.storage().max_gc_rounds()); - } + info!( + "\n\nCommitting a subdag from round {anchor_round} with {num_transmissions} transmissions: {subdag_metadata:?}\n" + ); + // Update the DAG, as the subdag was successfully included into a block. + let mut dag_write = self.dag.write(); + for certificate in commit_subdag.values().flatten() { + dag_write.commit(certificate, self.storage().max_gc_rounds()); } // Update the last election certificate IDs. // TODO (howardwu): This is currently writing the *latest* election certificate IDs, @@ -632,6 +627,10 @@ impl BFT { *last_election_certificate_ids = election_certificate_ids.clone(); } } + + // Perform garbage collection based on the latest committed leader round. + // self.storage().garbage_collect_certificates(latest_leader_round); + Ok(()) } @@ -668,6 +667,11 @@ impl BFT { if self.dag.read().is_recently_committed(previous_round, *previous_certificate_id) { continue; } + // If the previous certificate already exists in the ledger, continue. + if ALLOW_LEDGER_ACCESS && self.ledger().contains_certificate(previous_certificate_id).unwrap_or(false) { + continue; + } + // Retrieve the previous certificate. let previous_certificate = { // Start by retrieving the previous certificate from the DAG. @@ -678,28 +682,11 @@ impl BFT { None => match self.storage().get_certificate(*previous_certificate_id) { // If the previous certificate is found, return it. Some(previous_certificate) => previous_certificate, - // Otherwise, retrieve the previous certificate from the ledger. - None => { - if ALLOW_LEDGER_ACCESS { - match self.ledger().get_batch_certificate(previous_certificate_id) { - // If the previous certificate is found, return it. - Ok(previous_certificate) => previous_certificate, - // Otherwise, the previous certificate is missing, and throw an error. - Err(e) => { - bail!( - "Missing previous certificate {} for round {previous_round} - {e}", - fmt_id(previous_certificate_id) - ) - } - } - } else { - // Otherwise, the previous certificate is missing, and throw an error. - bail!( - "Missing previous certificate {} for round {previous_round}", - fmt_id(previous_certificate_id) - ) - } - } + // Otherwise, the previous certificate is missing, and throw an error. + None => bail!( + "Missing previous certificate {} for round {previous_round}", + fmt_id(previous_certificate_id) + ), }, } }; @@ -784,8 +771,8 @@ impl BFT { // Process the request to sync the BFT DAG at bootup. let self_ = self.clone(); self.spawn(async move { - while let Some((leader_certificates, certificates)) = rx_sync_bft_dag_at_bootup.recv().await { - self_.sync_bft_dag_at_bootup(leader_certificates, certificates).await; + while let Some(certificates) = rx_sync_bft_dag_at_bootup.recv().await { + self_.sync_bft_dag_at_bootup(certificates).await; } }); @@ -802,62 +789,19 @@ impl BFT { }); } - /// Syncs the BFT DAG with the given leader certificates and batch certificates. + /// Syncs the BFT DAG with the given batch certificates. These batch certificates **must** + /// already exist in the ledger. /// - /// This method starts by inserting all certificates (except the latest leader certificate) - /// into the DAG. Then, it commits all leader certificates (except the latest leader certificate). - /// Finally, it updates the DAG with the latest leader certificate. - async fn sync_bft_dag_at_bootup( - &self, - leader_certificates: Vec<(BatchCertificate, IndexSet>)>, - certificates: Vec>, - ) { - // Split the leader certificates into past leader certificates, the latest leader certificate, and the election certificate IDs. - let (past_leader_certificates, leader_certificate, election_certificate_ids) = { - // Compute the penultimate index. - let index = leader_certificates.len().saturating_sub(1); - // Split the leader certificates. - let (past, latest) = leader_certificates.split_at(index); - debug_assert!(latest.len() == 1, "There should only be one latest leader certificate"); - // Retrieve the latest leader certificate. - match latest.first() { - Some((leader_certificate, election_certificate_ids)) => { - (past, leader_certificate.clone(), election_certificate_ids.clone()) - } - // If there is no latest leader certificate, return early. - None => return, - } - }; - { - // Acquire the BFT write lock. - let mut dag = self.dag.write(); - // Iterate over the certificates. - for certificate in certificates { - // If the certificate is not the latest leader certificate, insert it. - if leader_certificate.id() != certificate.id() { - // Insert the certificate into the DAG. - dag.insert(certificate); - } - } - - // Acquire the last election certificate IDs. - let mut last_election_certificate_ids = self.last_election_certificate_ids.write(); - // Iterate over the leader certificates. - for (leader_certificate, election_certificate_ids) in past_leader_certificates { - // Commit the leader certificate. - dag.commit(leader_certificate, self.storage().max_gc_rounds()); - // Update the last election certificate IDs. - // - // Note: Because we will be committing the latest leader certificate after this, - // technically we do not need to be updating the last election certificate IDs - // for intermediate leader certificates. However, this is a safety mechanic to ensure completeness. - *last_election_certificate_ids = election_certificate_ids.clone(); - } - } - // Commit the latest leader certificate. - if let Err(e) = self.commit_leader_certificate::(leader_certificate, election_certificate_ids).await - { - error!("BFT failed to update the DAG with the latest leader certificate - {e}"); + /// This method commits all the certificates into the DAG. + /// Note that there is no need to insert the certificates into the DAG, because these certificates + /// already exist in the ledger and therefore do not need to be re-ordered into future committed subdags. + async fn sync_bft_dag_at_bootup(&self, certificates: Vec>) { + // Acquire the BFT write lock. + let mut dag = self.dag.write(); + + // Commit all the certificates excluding the latest leader certificate. + for certificate in certificates { + dag.commit(&certificate, self.storage().max_gc_rounds()); } } @@ -888,6 +832,7 @@ mod tests { use snarkos_node_bft_ledger_service::MockLedgerService; use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ + console::account::{Address, PrivateKey}, ledger::{ committee::Committee, narwhal::batch_certificate::test_helpers::{sample_batch_certificate, sample_batch_certificate_for_round}, @@ -896,7 +841,7 @@ mod tests { }; use anyhow::Result; - use indexmap::IndexSet; + use indexmap::{IndexMap, IndexSet}; use std::sync::{atomic::Ordering, Arc}; type CurrentNetwork = snarkvm::console::network::Testnet3; @@ -1250,4 +1195,531 @@ mod tests { assert_eq!(result.unwrap_err().to_string(), error_msg); Ok(()) } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_bft_gc_on_commit() -> Result<()> { + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = 1; + let committee_round = 0; + let commit_round = 2; + let next_round = commit_round + 1; + + // Sample the certificates. + let (_, certificates) = snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_with_previous_certificates( + next_round, + rng, + ); + + // Initialize the committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + vec![ + certificates[0].author(), + certificates[1].author(), + certificates[2].author(), + certificates[3].author(), + ], + rng, + ); + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + + // Initialize the storage. + let transmissions = Arc::new(BFTMemoryService::new()); + let storage = Storage::new(ledger.clone(), transmissions, max_gc_rounds); + // Insert the certificates into the storage. + for certificate in certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Get the leader certificate. + let leader = committee.get_leader(commit_round).unwrap(); + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader).unwrap(); + + // Initialize the BFT. + let account = Account::new(rng)?; + let bft = BFT::new(account, storage.clone(), ledger, None, &[], None)?; + // Insert a mock DAG in the BFT. + *bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(commit_round); + + // Ensure that the `gc_round` has not been updated yet. + assert_eq!(bft.storage().gc_round(), committee_round.saturating_sub(max_gc_rounds)); + + // Insert the certificates into the BFT. + for certificate in certificates { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + + // Commit the leader certificate. + bft.commit_leader_certificate::(leader_certificate, Default::default()).await.unwrap(); + + // Increment the BFT to the next round. + let _next_round = bft.storage().increment_to_next_round(commit_round); + + // Ensure that the `gc_round` has been updated. + assert_eq!(bft.storage().gc_round(), next_round - max_gc_rounds); + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_sync_bft_dag_at_bootup() -> Result<()> { + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = 1; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + + // Sample the current certificate and previous certificates. + let (_, certificates) = snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_with_previous_certificates( + current_round, + rng, + ); + + // Initialize the committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + vec![ + certificates[0].author(), + certificates[1].author(), + certificates[2].author(), + certificates[3].author(), + ], + rng, + ); + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Insert the certificates into the storage. + for certificate in certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Get the leader certificate. + let leader = committee.get_leader(commit_round).unwrap(); + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader).unwrap(); + + // Initialize the BFT. + let account = Account::new(rng)?; + let bft = BFT::new(account.clone(), storage, ledger.clone(), None, &[], None)?; + + // Insert a mock DAG in the BFT. + *bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(commit_round); + + // Insert the previous certificates into the BFT. + for certificate in certificates.clone() { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + + // Commit the leader certificate. + bft.commit_leader_certificate::(leader_certificate.clone(), Default::default()).await.unwrap(); + + // Simulate a bootup of the BFT. + + // Initialize a new instance of storage. + let storage_2 = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Initialize a new instance of BFT. + let bootup_bft = BFT::new(account, storage_2, ledger, None, &[], None)?; + + // Sync the BFT DAG at bootup. + bootup_bft.sync_bft_dag_at_bootup(certificates.clone()).await; + + // Check that the BFT starts from the same last committed round. + assert_eq!(bft.dag.read().last_committed_round(), bootup_bft.dag.read().last_committed_round()); + + // Ensure that both BFTs have committed the leader certificate. + assert!(bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + assert!(bootup_bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + + // Check the state of the bootup BFT. + for certificate in certificates { + let certificate_round = certificate.round(); + let certificate_id = certificate.id(); + // Check that the bootup BFT has committed the certificates. + assert!(bootup_bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + // Check that the bootup BFT does not contain the certificates in its graph, because + // it should not need to order them again in subsequent subdags. + assert!(!bootup_bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + } + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_sync_bft_dag_at_bootup_shutdown() -> Result<()> { + /* + 1. Run one uninterrupted BFT on a set of certificates for 2 leader commits. + 2. Run a separate bootup BFT that syncs with a set of pre shutdown certificates, and then commits a second leader normally over a set of post shutdown certificates. + 3. Observe that the uninterrupted BFT and the bootup BFT end in the same state. + */ + + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = crate::MAX_GC_ROUNDS; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + let next_round = current_round + 1; + + // Sample 5 rounds of batch certificates starting at the genesis round from a static set of 4 authors. + let (round_to_certificates_map, committee) = { + let private_keys = vec![ + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + ]; + let addresses = vec![ + Address::try_from(private_keys[0])?, + Address::try_from(private_keys[1])?, + Address::try_from(private_keys[2])?, + Address::try_from(private_keys[3])?, + ]; + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + addresses, + rng, + ); + // Initialize a mapping from the round number to the set of batch certificates in the round. + let mut round_to_certificates_map: IndexMap< + u64, + IndexSet>, + > = IndexMap::new(); + let mut previous_certificates = IndexSet::with_capacity(4); + // Initialize the genesis batch certificates. + for _ in 0..4 { + previous_certificates.insert(sample_batch_certificate(rng)); + } + for round in 0..commit_round + 3 { + let mut current_certificates = IndexSet::new(); + let previous_certificate_ids: IndexSet<_> = if round == 0 || round == 1 { + IndexSet::new() + } else { + previous_certificates.iter().map(|c| c.id()).collect() + }; + let transmission_ids = + snarkvm::ledger::narwhal::transmission_id::test_helpers::sample_transmission_ids(rng) + .into_iter() + .collect::>(); + let timestamp = time::OffsetDateTime::now_utc().unix_timestamp(); + for (i, private_key_1) in private_keys.iter().enumerate() { + let batch_header = snarkvm::ledger::narwhal::BatchHeader::new( + private_key_1, + round, + timestamp, + transmission_ids.clone(), + previous_certificate_ids.clone(), + Default::default(), + rng, + ) + .unwrap(); + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + let certificate = + snarkvm::ledger::narwhal::BatchCertificate::from(batch_header, signatures).unwrap(); + current_certificates.insert(certificate); + } + // Update the mapping. + round_to_certificates_map.insert(round, current_certificates.clone()); + previous_certificates = current_certificates.clone(); + } + (round_to_certificates_map, committee) + }; + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Get the leaders for the next 2 commit rounds. + let leader = committee.get_leader(commit_round).unwrap(); + let next_leader = committee.get_leader(next_round).unwrap(); + // Insert the pre shutdown certificates into the storage. + let mut pre_shutdown_certificates: Vec> = Vec::new(); + for i in 1..=commit_round { + let certificates = (*round_to_certificates_map.get(&i).unwrap()).clone(); + if i == commit_round { + // Only insert the leader certificate for the commit round. + let leader_certificate = certificates.iter().find(|certificate| certificate.author() == leader); + if let Some(c) = leader_certificate { + pre_shutdown_certificates.push(c.clone()); + } + continue; + } + pre_shutdown_certificates.extend(certificates); + } + for certificate in pre_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Insert the post shutdown certificates into the storage. + let mut post_shutdown_certificates: Vec> = + Vec::new(); + for j in commit_round..=commit_round + 2 { + let certificate = (*round_to_certificates_map.get(&j).unwrap()).clone(); + post_shutdown_certificates.extend(certificate); + } + for certificate in post_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Get the leader certificates. + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader).unwrap(); + let next_leader_certificate = storage.get_certificate_for_round_with_author(next_round, next_leader).unwrap(); + + // Initialize the BFT without bootup. + let account = Account::new(rng)?; + let bft = BFT::new(account.clone(), storage, ledger.clone(), None, &[], None)?; + + // Insert a mock DAG in the BFT without bootup. + *bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(0); + + // Insert the certificates into the BFT without bootup. + for certificate in pre_shutdown_certificates.clone() { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + + // Insert the post shutdown certificates into the BFT without bootup. + for certificate in post_shutdown_certificates.clone() { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + // Commit the second leader certificate. + let commit_subdag = bft.order_dag_with_dfs::(next_leader_certificate.clone()).unwrap(); + let commit_subdag_metadata = commit_subdag.iter().map(|(round, c)| (*round, c.len())).collect::>(); + bft.commit_leader_certificate::(next_leader_certificate.clone(), Default::default()).await.unwrap(); + + // Simulate a bootup of the BFT. + + // Initialize a new instance of storage. + let bootup_storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + + // Initialize a new instance of BFT with bootup. + let bootup_bft = BFT::new(account, bootup_storage.clone(), ledger.clone(), None, &[], None)?; + + // Sync the BFT DAG at bootup. + bootup_bft.sync_bft_dag_at_bootup(pre_shutdown_certificates.clone()).await; + + // Insert the post shutdown certificates to the storage and BFT with bootup. + for certificate in post_shutdown_certificates.iter() { + bootup_bft.storage().testing_only_insert_certificate_testing_only(certificate.clone()); + } + for certificate in post_shutdown_certificates.clone() { + assert!(bootup_bft.update_dag::(certificate).await.is_ok()); + } + // Commit the second leader certificate. + let commit_subdag_bootup = bootup_bft.order_dag_with_dfs::(next_leader_certificate.clone()).unwrap(); + let commit_subdag_metadata_bootup = + commit_subdag_bootup.iter().map(|(round, c)| (*round, c.len())).collect::>(); + let committed_certificates_bootup = commit_subdag_bootup.values().flatten(); + bootup_bft + .commit_leader_certificate::(next_leader_certificate.clone(), Default::default()) + .await + .unwrap(); + + // Check that the final state of both BFTs is the same. + + // Check that both BFTs start from the same last committed round. + assert_eq!(bft.dag.read().last_committed_round(), bootup_bft.dag.read().last_committed_round()); + + // Ensure that both BFTs have committed the leader certificates. + assert!(bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + assert!(bft.dag.read().is_recently_committed(next_leader_certificate.round(), next_leader_certificate.id())); + assert!(bootup_bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + assert!( + bootup_bft.dag.read().is_recently_committed(next_leader_certificate.round(), next_leader_certificate.id()) + ); + + // Check that the bootup BFT has committed the pre shutdown certificates. + for certificate in pre_shutdown_certificates.clone() { + let certificate_round = certificate.round(); + let certificate_id = certificate.id(); + // Check that both BFTs have committed the certificates. + assert!(bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + assert!(bootup_bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + // Check that the bootup BFT does not contain the certificates in its graph, because + // it should not need to order them again in subsequent subdags. + assert!(!bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + assert!(!bootup_bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + } + + // Check that that the bootup BFT has committed the subdag stemming from the second leader certificate in consensus. + for certificate in committed_certificates_bootup.clone() { + let certificate_round = certificate.round(); + let certificate_id = certificate.id(); + // Check that the both BFTs have committed the certificates. + assert!(bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + assert!(bootup_bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + // Check that the bootup BFT does not contain the certificates in its graph, because + // it should not need to order them again in subsequent subdags. + assert!(!bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + assert!(!bootup_bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + } + + // Check that the commit subdag metadata for the second leader is the same for both BFTs. + assert_eq!(commit_subdag_metadata_bootup, commit_subdag_metadata); + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_sync_bft_dag_at_bootup_dfs() -> Result<()> { + /* + 1. Run a bootup BFT that syncs with a set of pre shutdown certificates. + 2. Add post shutdown certificates to the bootup BFT. + 2. Observe that in the commit subdag of the second leader certificate, there are no repeated vertices from the pre shutdown certificates. + */ + + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = crate::MAX_GC_ROUNDS; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + let next_round = current_round + 1; + + // Sample 5 rounds of batch certificates starting at the genesis round from a static set of 4 authors. + let (round_to_certificates_map, committee) = { + let private_keys = vec![ + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + ]; + let addresses = vec![ + Address::try_from(private_keys[0])?, + Address::try_from(private_keys[1])?, + Address::try_from(private_keys[2])?, + Address::try_from(private_keys[3])?, + ]; + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + addresses, + rng, + ); + // Initialize a mapping from the round number to the set of batch certificates in the round. + let mut round_to_certificates_map: IndexMap< + u64, + IndexSet>, + > = IndexMap::new(); + let mut previous_certificates = IndexSet::with_capacity(4); + // Initialize the genesis batch certificates. + for _ in 0..4 { + previous_certificates.insert(sample_batch_certificate(rng)); + } + for round in 0..=commit_round + 2 { + let mut current_certificates = IndexSet::new(); + let previous_certificate_ids: IndexSet<_> = if round == 0 || round == 1 { + IndexSet::new() + } else { + previous_certificates.iter().map(|c| c.id()).collect() + }; + let transmission_ids = + snarkvm::ledger::narwhal::transmission_id::test_helpers::sample_transmission_ids(rng) + .into_iter() + .collect::>(); + let timestamp = time::OffsetDateTime::now_utc().unix_timestamp(); + for (i, private_key_1) in private_keys.iter().enumerate() { + let batch_header = snarkvm::ledger::narwhal::BatchHeader::new( + private_key_1, + round, + timestamp, + transmission_ids.clone(), + previous_certificate_ids.clone(), + Default::default(), + rng, + ) + .unwrap(); + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + let certificate = + snarkvm::ledger::narwhal::BatchCertificate::from(batch_header, signatures).unwrap(); + current_certificates.insert(certificate); + } + // Update the mapping. + round_to_certificates_map.insert(round, current_certificates.clone()); + previous_certificates = current_certificates.clone(); + } + (round_to_certificates_map, committee) + }; + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Get the leaders for the next 2 commit rounds. + let leader = committee.get_leader(commit_round).unwrap(); + let next_leader = committee.get_leader(next_round).unwrap(); + // Insert the pre shutdown certificates into the storage. + let mut pre_shutdown_certificates: Vec> = Vec::new(); + for i in 1..=commit_round { + let certificates = (*round_to_certificates_map.get(&i).unwrap()).clone(); + if i == commit_round { + // Only insert the leader certificate for the commit round. + let leader_certificate = certificates.iter().find(|certificate| certificate.author() == leader); + if let Some(c) = leader_certificate { + pre_shutdown_certificates.push(c.clone()); + } + continue; + } + pre_shutdown_certificates.extend(certificates); + } + for certificate in pre_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Initialize the bootup BFT. + let account = Account::new(rng)?; + let bootup_bft = BFT::new(account.clone(), storage.clone(), ledger.clone(), None, &[], None)?; + // Insert a mock DAG in the BFT without bootup. + *bootup_bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(0); + // Sync the BFT DAG at bootup. + bootup_bft.sync_bft_dag_at_bootup(pre_shutdown_certificates.clone()).await; + + // Insert the post shutdown certificates into the storage. + let mut post_shutdown_certificates: Vec> = + Vec::new(); + for j in commit_round..=commit_round + 2 { + let certificate = (*round_to_certificates_map.get(&j).unwrap()).clone(); + post_shutdown_certificates.extend(certificate); + } + for certificate in post_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Insert the post shutdown certificates into the DAG. + for certificate in post_shutdown_certificates.clone() { + assert!(bootup_bft.update_dag::(certificate).await.is_ok()); + } + + // Get the next leader certificate to commit. + let next_leader_certificate = storage.get_certificate_for_round_with_author(next_round, next_leader).unwrap(); + let commit_subdag = bootup_bft.order_dag_with_dfs::(next_leader_certificate).unwrap(); + let committed_certificates = commit_subdag.values().flatten(); + + // Check that none of the certificates synced from the bootup appear in the subdag for the next commit round. + for pre_shutdown_certificate in pre_shutdown_certificates.clone() { + for committed_certificate in committed_certificates.clone() { + assert_ne!(pre_shutdown_certificate.id(), committed_certificate.id()); + } + } + Ok(()) + } } diff --git a/node/bft/src/helpers/channels.rs b/node/bft/src/helpers/channels.rs index 8881b1f0ca..182aa0a801 100644 --- a/node/bft/src/helpers/channels.rs +++ b/node/bft/src/helpers/channels.rs @@ -64,8 +64,7 @@ pub struct BFTSender { pub tx_last_election_certificate_ids: mpsc::Sender>>>, pub tx_primary_round: mpsc::Sender<(u64, oneshot::Sender)>, pub tx_primary_certificate: mpsc::Sender<(BatchCertificate, oneshot::Sender>)>, - pub tx_sync_bft_dag_at_bootup: - mpsc::Sender<(Vec<(BatchCertificate, IndexSet>)>, Vec>)>, + pub tx_sync_bft_dag_at_bootup: mpsc::Sender>>, pub tx_sync_bft: mpsc::Sender<(BatchCertificate, oneshot::Sender>)>, } @@ -116,8 +115,7 @@ pub struct BFTReceiver { pub rx_last_election_certificate_ids: mpsc::Receiver>>>, pub rx_primary_round: mpsc::Receiver<(u64, oneshot::Sender)>, pub rx_primary_certificate: mpsc::Receiver<(BatchCertificate, oneshot::Sender>)>, - pub rx_sync_bft_dag_at_bootup: - mpsc::Receiver<(Vec<(BatchCertificate, IndexSet>)>, Vec>)>, + pub rx_sync_bft_dag_at_bootup: mpsc::Receiver>>, pub rx_sync_bft: mpsc::Receiver<(BatchCertificate, oneshot::Sender>)>, } diff --git a/node/bft/src/lib.rs b/node/bft/src/lib.rs index aa93a882af..3b569e87ae 100644 --- a/node/bft/src/lib.rs +++ b/node/bft/src/lib.rs @@ -14,6 +14,7 @@ #![forbid(unsafe_code)] #![allow(clippy::type_complexity)] +#![allow(clippy::unit_arg)] #[macro_use] extern crate async_trait; diff --git a/node/bft/src/primary.rs b/node/bft/src/primary.rs index 8e79cf7590..cabcdbb618 100644 --- a/node/bft/src/primary.rs +++ b/node/bft/src/primary.rs @@ -527,6 +527,13 @@ impl Primary { if let Some((signed_round, signed_batch_id, signature)) = self.signed_proposals.read().get(&batch_author).copied() { + // If the signed round is ahead of the peer's batch round, then the validator is malicious. + if signed_round > batch_header.round() { + // Proceed to disconnect the validator. + self.gateway.disconnect(peer_ip); + bail!("Malicious peer - proposed a batch for a previous round ({})", batch_header.round()); + } + // If the round matches and the batch ID differs, then the validator is malicious. if signed_round == batch_header.round() && signed_batch_id != batch_header.batch_id() { // Proceed to disconnect the validator. @@ -551,15 +558,30 @@ impl Primary { } // If the peer is ahead, use the batch header to sync up to the peer. - let transmissions = self.sync_with_batch_header_from_peer(peer_ip, &batch_header).await?; + let mut transmissions = self.sync_with_batch_header_from_peer(peer_ip, &batch_header).await?; + + // Check that the transmission ids match and are not fee transactions. + for (transmission_id, transmission) in transmissions.iter_mut() { + // If the transmission is not well-formed, then return early. + if let Err(err) = self.ledger.ensure_transmission_is_well_formed(*transmission_id, transmission) { + debug!("Batch propose from '{peer_ip}' contains an invalid transmission - {err}",); + return Ok(()); + } + } // Ensure the batch is for the current round. // This method must be called after fetching previous certificates (above), // and prior to checking the batch header (below). - self.ensure_is_signing_round(batch_round)?; + if let Err(e) = self.ensure_is_signing_round(batch_round) { + // If the primary is not signing for the peer's round, then return early. + debug!("{e} from '{peer_ip}'"); + return Ok(()); + } // Ensure the batch header from the peer is valid. - let missing_transmissions = self.storage.check_batch_header(&batch_header, transmissions)?; + let storage = self.storage.clone(); + let header = batch_header.clone(); + let missing_transmissions = spawn_blocking!(storage.check_batch_header(&header, transmissions))?; // Inserts the missing transmissions into the workers. self.insert_missing_transmissions_into_workers(peer_ip, missing_transmissions.into_iter())?; @@ -716,6 +738,8 @@ impl Primary { // Retrieve the batch certificate author. let author = certificate.author(); + // Retrieve the batch certificate round. + let certificate_round = certificate.round(); // Ensure the batch certificate is from an authorized validator. if !self.gateway.is_authorized_validator_ip(peer_ip) { @@ -731,28 +755,34 @@ impl Primary { // Store the certificate, after ensuring it is valid. self.sync_with_certificate_from_peer(peer_ip, certificate).await?; - // If there are enough certificates to reach quorum threshold for the current round, + // If there are enough certificates to reach quorum threshold for the certificate round, // then proceed to advance to the next round. - // Retrieve the current round. - let current_round = self.current_round(); // Retrieve the committee lookback. - let committee_lookback = self.ledger.get_committee_lookback_for_round(current_round)?; + let committee_lookback = self.ledger.get_committee_lookback_for_round(certificate_round)?; // Retrieve the certificates. - let certificates = self.storage.get_certificates_for_round(current_round); + let certificates = self.storage.get_certificates_for_round(certificate_round); // Construct a set over the authors. let authors = certificates.iter().map(BatchCertificate::author).collect(); // Check if the certificates have reached the quorum threshold. let is_quorum = committee_lookback.is_quorum_threshold_reached(&authors); - // Determine if we are currently proposing a round. + // Determine if we are currently proposing a round that is relevant. // Note: This is important, because while our peers have advanced, // they may not be proposing yet, and thus still able to sign our proposed batch. - let is_proposing = self.proposed_batch.read().is_some(); + let should_advance = match &*self.proposed_batch.read() { + // We advance if the proposal round is less than the current round that was just certified. + Some(proposal) => proposal.round() < certificate_round, + // If there's no proposal, we consider advancing. + None => true, + }; + + // Retrieve the current round. + let current_round = self.current_round(); // Determine whether to advance to the next round. - if is_quorum && !is_proposing { - // If we have reached the quorum threshold, then proceed to the next round. + if is_quorum && should_advance && certificate_round >= current_round { + // If we have reached the quorum threshold and the round should advance, then proceed to the next round. self.try_increment_to_the_next_round(current_round + 1).await?; } Ok(()) @@ -965,6 +995,12 @@ impl Primary { debug!("Skipping batch proposal {}", "(node is syncing)".dimmed()); continue; } + // A best-effort attempt to skip the scheduled batch proposal if + // round progression already triggered one. + if self_.propose_lock.try_lock().is_err() { + trace!("Skipping batch proposal {}", "(node is already proposing)".dimmed()); + continue; + }; // If there is no proposed batch, attempt to propose a batch. // Note: Do NOT spawn a task around this function call. Proposing a batch is a critical path, // and only one batch needs be proposed at a time. @@ -1184,7 +1220,9 @@ impl Primary { // Note: Do not change the `Proposal` to use a HashMap. The ordering there is necessary for safety. let transmissions = transmissions.into_iter().collect::>(); // Store the certified batch. - self.storage.insert_certificate(certificate.clone(), transmissions)?; + let storage = self.storage.clone(); + let certificate_clone = certificate.clone(); + spawn_blocking!(storage.insert_certificate(certificate_clone, transmissions))?; debug!("Stored a batch certificate for round {}", certificate.round()); // If a BFT sender was provided, send the certificate to the BFT. if let Some(bft_sender) = self.bft_sender.get() { @@ -1263,7 +1301,9 @@ impl Primary { // Check if the certificate needs to be stored. if !self.storage.contains_certificate(certificate.id()) { // Store the batch certificate. - self.storage.insert_certificate(certificate.clone(), missing_transmissions)?; + let storage = self.storage.clone(); + let certificate_clone = certificate.clone(); + spawn_blocking!(storage.insert_certificate(certificate_clone, missing_transmissions))?; debug!("Stored a batch certificate for round {batch_round} from '{peer_ip}'"); // If a BFT sender was provided, send the round and certificate to the BFT. if let Some(bft_sender) = self.bft_sender.get() { @@ -1359,9 +1399,10 @@ impl Primary { return Ok(Default::default()); } - // Ensure this batch ID is new. + // Ensure this batch ID is new, otherwise return early. if self.storage.contains_batch(batch_header.batch_id()) { - bail!("Batch for round {} from peer has already been processed", batch_header.round()) + trace!("Batch for round {} from peer has already been processed", batch_header.round()); + return Ok(Default::default()); } // Retrieve the workers. diff --git a/node/bft/src/sync/mod.rs b/node/bft/src/sync/mod.rs index 517dbca5eb..b41b92b5e5 100644 --- a/node/bft/src/sync/mod.rs +++ b/node/bft/src/sync/mod.rs @@ -13,7 +13,8 @@ // limitations under the License. use crate::{ - helpers::{BFTSender, Pending, Storage, SyncReceiver}, + helpers::{fmt_id, BFTSender, Pending, Storage, SyncReceiver}, + spawn_blocking, Gateway, Transport, MAX_BATCH_DELAY_IN_MS, @@ -177,6 +178,7 @@ impl Sync { // Methods to manage storage. impl Sync { /// Syncs the storage with the ledger at bootup. + #[allow(clippy::unnecessary_to_owned)] pub async fn sync_storage_with_ledger_at_bootup(&self) -> Result<()> { // Retrieve the latest block in the ledger. let latest_block = self.ledger.latest_block(); @@ -205,33 +207,17 @@ impl Sync { // If the block authority is a subdag, then sync the batch certificates with the block. if let Authority::Quorum(subdag) = block.authority() { // Iterate over the certificates. - for certificate in subdag.values().flatten() { + for certificate in subdag.values().flatten().cloned() { // Sync the batch certificate with the block. - self.storage.sync_certificate_with_block(block, certificate); + let storage = self.storage.clone(); + let block = block.clone(); + let _ = spawn_blocking!(Ok(storage.sync_certificate_with_block(&block, &certificate))); } } } /* Sync the BFT DAG */ - // Retrieve the leader certificates. - let leader_certificates = blocks - .iter() - .flat_map(|block| { - match block.authority() { - // If the block authority is a beacon, then skip the block. - Authority::Beacon(_) => None, - // If the block authority is a subdag, then retrieve the certificates. - Authority::Quorum(subdag) => { - Some((subdag.leader_certificate().clone(), subdag.election_certificate_ids().clone())) - } - } - }) - .collect::>(); - if leader_certificates.is_empty() { - return Ok(()); - } - // Construct a list of the certificates. let certificates = blocks .iter() @@ -246,10 +232,10 @@ impl Sync { .flatten() .collect::>(); - // If a BFT sender was provided, send the certificate to the BFT. + // If a BFT sender was provided, send the certificates to the BFT. if let Some(bft_sender) = self.bft_sender.get() { // Await the callback to continue. - if let Err(e) = bft_sender.tx_sync_bft_dag_at_bootup.send((leader_certificates, certificates)).await { + if let Err(e) = bft_sender.tx_sync_bft_dag_at_bootup.send(certificates).await { bail!("Failed to update the BFT DAG from sync: {e}"); } } @@ -282,7 +268,10 @@ impl Sync { // Iterate over the certificates. for certificate in subdag.values().flatten() { // Sync the batch certificate with the block. - self.storage.sync_certificate_with_block(&block, certificate); + let storage = self.storage.clone(); + let block_clone = block.clone(); + let certificate_clone = certificate.clone(); + let _ = spawn_blocking!(Ok(storage.sync_certificate_with_block(&block_clone, &certificate_clone))); // If a BFT sender was provided, send the certificate to the BFT. if let Some(bft_sender) = self.bft_sender.get() { // Await the callback to continue. @@ -337,7 +326,7 @@ impl Sync { if self.pending.insert(certificate_id, peer_ip, Some(callback_sender)) { // Send the certificate request to the peer. if self.gateway.send(peer_ip, Event::CertificateRequest(certificate_id.into())).await.is_none() { - bail!("Unable to fetch batch certificate {certificate_id} - failed to send request") + bail!("Unable to fetch certificate {} - failed to send request", fmt_id(certificate_id)) } } // Wait for the certificate to be fetched. @@ -345,7 +334,7 @@ impl Sync { // If the certificate was fetched, return it. Ok(result) => Ok(result?), // If the certificate was not fetched, return an error. - Err(e) => bail!("Unable to fetch batch certificate {certificate_id} - (timeout) {e}"), + Err(e) => bail!("Unable to fetch certificate {} - (timeout) {e}", fmt_id(certificate_id)), } } diff --git a/node/bft/src/worker.rs b/node/bft/src/worker.rs index b07cb8a8f1..49ac94f67f 100644 --- a/node/bft/src/worker.rs +++ b/node/bft/src/worker.rs @@ -399,8 +399,8 @@ impl Worker { let exists = self.pending.get(transmission_id).unwrap_or_default().contains(&peer_ip); // If the peer IP exists, finish the pending request. if exists { - // Ensure the transmission ID matches the transmission. - match self.ledger.ensure_transmission_id_matches(transmission_id, &mut transmission) { + // Ensure the transmission is not a fee and matches the transmission ID. + match self.ledger.ensure_transmission_is_well_formed(transmission_id, &mut transmission) { Ok(()) => { // Remove the transmission ID from the pending queue. self.pending.remove(transmission_id, Some(transmission)); @@ -487,7 +487,7 @@ mod tests { fn get_committee_lookback_for_round(&self, round: u64) -> Result>; fn contains_certificate(&self, certificate_id: &Field) -> Result; fn contains_transmission(&self, transmission_id: &TransmissionID) -> Result; - fn ensure_transmission_id_matches( + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, transmission: &mut Transmission, @@ -550,6 +550,7 @@ mod tests { let rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let mut gateway = MockGateway::default(); gateway.expect_send().returning(|_, _| { @@ -558,7 +559,8 @@ mod tests { }); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); - mock_ledger.expect_ensure_transmission_id_matches().returning(|_, _| Ok(())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); + mock_ledger.expect_ensure_transmission_is_well_formed().returning(|_, _| Ok(())); let ledger: Arc> = Arc::new(mock_ledger); // Initialize the storage. let storage = Storage::::new(ledger.clone(), Arc::new(BFTMemoryService::new()), 1); diff --git a/node/router/src/inbound.rs b/node/router/src/inbound.rs index 1b0cf0da22..e47cf39665 100644 --- a/node/router/src/inbound.rs +++ b/node/router/src/inbound.rs @@ -207,7 +207,8 @@ pub trait Inbound: Reading + Outbound { let seen_before = self.router().cache.insert_inbound_solution(peer_ip, message.solution_id).is_some(); // Determine whether to propagate the solution. if seen_before { - bail!("Skipping 'UnconfirmedSolution' from '{peer_ip}'") + trace!("Skipping 'UnconfirmedSolution' from '{peer_ip}'"); + return Ok(()); } // Perform the deferred non-blocking deserialization of the solution. let solution = match message.solution.deserialize().await { @@ -232,7 +233,8 @@ pub trait Inbound: Reading + Outbound { self.router().cache.insert_inbound_transaction(peer_ip, message.transaction_id).is_some(); // Determine whether to propagate the transaction. if seen_before { - bail!("Skipping 'UnconfirmedTransaction' from '{peer_ip}'") + trace!("Skipping 'UnconfirmedTransaction' from '{peer_ip}'"); + return Ok(()); } // Perform the deferred non-blocking deserialization of the transaction. let transaction = match message.transaction.deserialize().await { diff --git a/node/src/traits.rs b/node/src/traits.rs index 12947ca3f2..6fd41686c6 100644 --- a/node/src/traits.rs +++ b/node/src/traits.rs @@ -17,6 +17,8 @@ use snarkvm::prelude::{Address, Network, PrivateKey, ViewKey}; use once_cell::sync::OnceCell; use std::{ + future::Future, + io, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -53,15 +55,40 @@ pub trait NodeInterface: Routing { /// Handles OS signals for the node to intercept and perform a clean shutdown. /// The optional `shutdown_flag` flag can be used to cleanly terminate the syncing process. - /// Note: Only Ctrl-C is supported; it should work on both Unix-family systems and Windows. fn handle_signals(shutdown_flag: Arc) -> Arc> { // In order for the signal handler to be started as early as possible, a reference to the node needs // to be passed to it at a later time. let node: Arc> = Default::default(); + #[cfg(target_family = "unix")] + fn signal_listener() -> impl Future> { + use tokio::signal::unix::{signal, SignalKind}; + + // Handle SIGINT, SIGTERM, SIGQUIT, and SIGHUP. + let mut s_int = signal(SignalKind::interrupt()).unwrap(); + let mut s_term = signal(SignalKind::terminate()).unwrap(); + let mut s_quit = signal(SignalKind::quit()).unwrap(); + let mut s_hup = signal(SignalKind::hangup()).unwrap(); + + // Return when any of the signals above is received. + async move { + tokio::select!( + _ = s_int.recv() => (), + _ = s_term.recv() => (), + _ = s_quit.recv() => (), + _ = s_hup.recv() => (), + ); + Ok(()) + } + } + #[cfg(not(target_family = "unix"))] + fn signal_listener() -> impl Future> { + tokio::signal::ctrl_c() + } + let node_clone = node.clone(); tokio::task::spawn(async move { - match tokio::signal::ctrl_c().await { + match signal_listener().await { Ok(()) => { match node_clone.get() { // If the node is already initialized, then shut it down. diff --git a/node/sync/src/block_sync.rs b/node/sync/src/block_sync.rs index aadb6873a8..497d66adb8 100644 --- a/node/sync/src/block_sync.rs +++ b/node/sync/src/block_sync.rs @@ -36,8 +36,11 @@ use std::{ time::Instant, }; +#[cfg(not(test))] +pub const REDUNDANCY_FACTOR: usize = 1; +#[cfg(test)] pub const REDUNDANCY_FACTOR: usize = 3; -const EXTRA_REDUNDANCY_FACTOR: usize = REDUNDANCY_FACTOR * 2; +const EXTRA_REDUNDANCY_FACTOR: usize = REDUNDANCY_FACTOR * 3; const NUM_SYNC_CANDIDATE_PEERS: usize = REDUNDANCY_FACTOR * 5; const BLOCK_REQUEST_TIMEOUT_IN_SECS: u64 = 60; // 60 seconds diff --git a/snarkos/main.rs b/snarkos/main.rs index 64cef2f182..61a86949b8 100644 --- a/snarkos/main.rs +++ b/snarkos/main.rs @@ -17,10 +17,10 @@ use snarkos_cli::{commands::CLI, helpers::Updater}; use clap::Parser; use std::process::exit; -#[cfg(feature = "jemalloc")] +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] use tikv_jemallocator::Jemalloc; -#[cfg(feature = "jemalloc")] +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] #[global_allocator] static GLOBAL: Jemalloc = Jemalloc;