diff --git a/.circleci/config.yml b/.circleci/config.yml index 47b63f2eb4..21641255cc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,6 @@ commands: default: snarkos-stable-cache steps: - run: set -e - - setup_remote_docker - run: name: Prepare environment and install dependencies command: | @@ -160,8 +159,8 @@ commands: jobs: integration: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - run_serial_long: workspace_member: .integration @@ -169,8 +168,8 @@ jobs: snarkos: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - run_serial: workspace_member: . @@ -178,8 +177,8 @@ jobs: account: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: account @@ -187,8 +186,8 @@ jobs: cli: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: cli @@ -196,8 +195,8 @@ jobs: display: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: display @@ -205,8 +204,8 @@ jobs: node: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - run_serial: workspace_member: node @@ -214,8 +213,8 @@ jobs: node-bft: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - run_serial: workspace_member: node/bft @@ -223,8 +222,8 @@ jobs: node-bft-events: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - run_serial: workspace_member: node/bft/events @@ -232,8 +231,8 @@ jobs: node-bft-ledger-service: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/bft/ledger-service @@ -241,8 +240,8 @@ jobs: node-bft-storage-service: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/bft/storage-service @@ -250,8 +249,8 @@ jobs: node-cdn: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/cdn @@ -259,8 +258,8 @@ jobs: node-consensus: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - run_serial: workspace_member: node/consensus @@ -268,8 +267,8 @@ jobs: node-rest: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/rest @@ -277,8 +276,8 @@ jobs: node-router: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/router @@ -286,8 +285,8 @@ jobs: node-router-messages: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/router/messages @@ -295,8 +294,8 @@ jobs: node-sync: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/sync @@ -304,8 +303,8 @@ jobs: node-sync-communication-service: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/sync/communication-service @@ -313,8 +312,8 @@ jobs: node-sync-locators: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/sync/locators @@ -322,8 +321,8 @@ jobs: node-tcp: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - run_serial: workspace_member: node/tcp @@ -331,8 +330,8 @@ jobs: check-fmt: docker: - - image: cimg/rust:1.74.1 - resource_class: xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/xlarge steps: - checkout - install_rust_nightly @@ -347,8 +346,8 @@ jobs: check-clippy: docker: - - image: cimg/rust:1.74.1 - resource_class: 2xlarge + - image: cimg/rust:1.76.0 # Attention - Change the MSRV in Cargo.toml and rust-toolchain as well + resource_class: anf/2xlarge steps: - checkout - setup_environment: @@ -420,6 +419,6 @@ workflows: filters: branches: only: - - testnet3 + - mainnet jobs: - - integration + - integration \ No newline at end of file diff --git a/.devnet/.analytics/analytics.js b/.devnet/.analytics/analytics.js index 32fc0e0abd..601be7a19f 100644 --- a/.devnet/.analytics/analytics.js +++ b/.devnet/.analytics/analytics.js @@ -6,6 +6,44 @@ const yargs = require('yargs'); const dimStart = "\x1b[2m"; const dimEnd = "\x1b[0m"; +// Function to get the IP address of a given AWS node +async function getIPAddress(awsNodeName) { + // Read the ~/.ssh/config file + const sshConfigFile = fs.readFileSync(`${process.env.HOME}/.ssh/config`, 'utf8'); + + // Use regular expressions to extract the associated IP address + const regex = new RegExp(`Host\\s+${awsNodeName}[\\s\\S]*?HostName\\s+(\\S+)`); + const match = sshConfigFile.match(regex); + + if (match && match[1]) { + return match[1]; + } else { + console.error(`No IP address found for ${awsNodeName} in ~/.ssh/config`); + } +} + +// Function to get the count of AWS nodes based on the naming convention aws-nXX in the SSH config file +async function getAWSNodeCount() { + // Read the ~/.ssh/config file + const sshConfigFile = fs.readFileSync(`${process.env.HOME}/.ssh/config`, 'utf8'); + + // Regular expression to match all aws-nXX formats + const regex = /Host\s+(aws-n\d+)/g; + let match; + let highestNumber = -1; + + // Iterate over all matches and find the highest number + while ((match = regex.exec(sshConfigFile)) !== null) { + const nodeNumber = parseInt(match[1].replace('aws-n', ''), 10); + if (nodeNumber > highestNumber) { + highestNumber = nodeNumber; + } + } + + // Return the count of nodes, adding 1 because it starts from 0 + return highestNumber >= 0 ? highestNumber + 1 : 0; +} + // Function to fetch block data async function fetchBlockData(baseUrl, height) { try { @@ -77,10 +115,32 @@ async function calculateRoundsInBlocks(baseUrl, latestHeight) { } } +async function checkBlockHash(blockHeight) { + const numNodes = await getAWSNodeCount(); + console.log(`Detected ${numNodes} AWS nodes... \n`); + + for (let i = 0; i < numNodes; i++) { + // Define the AWS node name to search for (e.g., aws-n1) + const awsNodeName = `aws-n${i}`; + // Get the IP address of the AWS node + const ipAddress = await getIPAddress(awsNodeName); + // Define the base URL for the node + const baseUrl = `http://${ipAddress}:3030/mainnet/block`; + + // Fetch the block data + const blockData = await fetchBlockData(baseUrl, blockHeight); + if (blockData && blockData.block_hash) { + console.log(`${awsNodeName} - Block ${blockHeight} - ${blockData.block_hash}`); + } else { + console.log(`${awsNodeName} - Block ${blockHeight} - No block hash found`); + } + } +} + // Main function to fetch block metrics -async function fetchBlockMetrics(baseUrl, metricType) { +async function fetchBlockMetrics(metricType, optionalBlockHeight) { // Function to get the latest block height - async function getLatestBlockHeight() { + async function getLatestBlockHeight(baseUrl) { try { const response = await axios.get(`${baseUrl}/height/latest`); const latestHeight = response.data; @@ -92,7 +152,17 @@ async function fetchBlockMetrics(baseUrl, metricType) { } } - const latestHeight = await getLatestBlockHeight(); + // Define the AWS node name to search for (e.g., aws-n1) + const awsNodeName = 'aws-n1'; + // Get the IP address of the AWS node + const ipAddress = await getIPAddress(awsNodeName); + // Define the base URL for the node. + const baseUrl = `http://${ipAddress}:3030/mainnet/block`; + + console.log(`${dimStart}IP Address: ${ipAddress}${dimEnd}`); + console.log(`${dimStart}Base URL: ${baseUrl}${dimEnd}`); + + const latestHeight = await getLatestBlockHeight(baseUrl); if (latestHeight === null) { console.error('Unable to fetch latest block height, try again...'); return; @@ -104,6 +174,8 @@ async function fetchBlockMetrics(baseUrl, metricType) { calculateAverageBlockTime(baseUrl, latestHeight); } else if (metricType === 'roundsInBlocks') { calculateRoundsInBlocks(baseUrl, latestHeight); + } else if (metricType === 'checkBlockHash' && optionalBlockHeight) { + checkBlockHash(optionalBlockHeight); } else { console.error('Invalid metric type. Supported types: "averageBlockTime" or "roundsInBlocks".'); } @@ -115,35 +187,27 @@ async function main() { .options({ 'metric-type': { alias: 'm', - describe: 'Metric type to fetch (averageBlockTime or roundsInBlocks)', + describe: 'Metric type to fetch (averageBlockTime, roundsInBlocks, or checkBlockHash)', demandOption: true, - choices: ['averageBlockTime', 'roundsInBlocks'], + choices: ['averageBlockTime', 'roundsInBlocks', 'checkBlockHash'], + }, + 'block-height': { + alias: 'b', + describe: 'Block height to examine for checkBlockHash metric', + type: 'number', }, }) + .check((argv) => { + // Check if metric-type is checkBlockHash and block-height is provided + if (argv['metric-type'] === 'checkBlockHash' && (isNaN(argv['block-height']) || argv['block-height'] == null)) { + throw new Error('Block height is required when metric-type is checkBlockHash'); + } + return true; // Indicate that the arguments passed the check + }) .argv; - // Read the ~/.ssh/config file - const sshConfigFile = fs.readFileSync(`${process.env.HOME}/.ssh/config`, 'utf8'); - - // Define the AWS node name to search for (e.g., aws-n1) - const awsNodeName = 'aws-n1'; - - // Use regular expressions to extract the IP address associated with aws-n0 - const regex = new RegExp(`Host\\s+${awsNodeName}[\\s\\S]*?HostName\\s+(\\S+)`); - const match = sshConfigFile.match(regex); - - if (match && match[1]) { - const ipAddress = match[1]; - const baseUrl = `http://${ipAddress}:3033/testnet3/block`; - - console.log(`${dimStart}IP Address: ${ipAddress}${dimEnd}`); - console.log(`${dimStart}Base URL: ${baseUrl}${dimEnd}`); - - // Fetch and output the specified block metric - fetchBlockMetrics(baseUrl, argv['metric-type']); - } else { - console.error(`No IP address found for ${awsNodeName} in ~/.ssh/config`); - } + // Fetch and output the specified block metric + fetchBlockMetrics(argv['metric-type'], argv['block-height']); } // Run the main function diff --git a/.devnet/.analytics/package.json b/.devnet/.analytics/package.json index ebb359a766..267cfc4826 100644 --- a/.devnet/.analytics/package.json +++ b/.devnet/.analytics/package.json @@ -4,8 +4,6 @@ "description": "", "main": "analytics.js", "scripts": { - "averageBlockTime": "node analytics.js --metric-type averageBlockTime", - "roundsInBlocks": "node analytics.js --metric-type roundsInBlocks", "test": "echo \"Error: no test specified\" && exit 1" }, "author": "The Aleo Team", diff --git a/.devnet/README.md b/.devnet/README.md index 559e8984de..57610c5ef3 100644 --- a/.devnet/README.md +++ b/.devnet/README.md @@ -6,11 +6,11 @@ Start by creating EC2 instances in the AWS console. - Ubuntu 22.04 LTS (not Amazon Linux) - Security Group - Inbound Policy - SSH - Port 22 - 0.0.0.0/0 (or your IP) - - Custom TCP - Port 3033 - 0.0.0.0/0 (or your IP) - - Custom TCP - Port 4133 - 0.0.0.0/0 + - Custom TCP - Port 3030 - 0.0.0.0/0 (or your IP) + - Custom TCP - Port 4130 - 0.0.0.0/0 - Custom TCP - Port 5000 - 0.0.0.0/0 -Be sure the give the EC2 instances a name tag, i.e. `testnet3`. +Be sure the give the EC2 instances a name tag, i.e. `devnet`. Make sure you set the correct SSH `.pem` and have the `.pem` in your `~/.ssh` directory. diff --git a/.devnet/analytics.sh b/.devnet/analytics.sh index e2671f552b..9284f3a7e7 100755 --- a/.devnet/analytics.sh +++ b/.devnet/analytics.sh @@ -16,16 +16,30 @@ fi # Prompt the user to select a metric type PS3="Select a metric type: " -options=("Average Block Time" "Rounds in Blocks" "Quit") +options=("Average Block Time" "Rounds in Blocks" "Check Block Hash" "Quit") select opt in "${options[@]}" do case $opt in "Average Block Time") - npm run averageBlockTime + echo "" + node analytics.js --metric-type averageBlockTime break ;; "Rounds in Blocks") - npm run roundsInBlocks + echo "" + node analytics.js --metric-type roundsInBlocks + break + ;; + "Check Block Hash") + echo "You selected 'Check Block Hash'. Please enter the block height:" + read blockHeight + echo "" + # Validate input is an integer + if ! [[ "$blockHeight" =~ ^[0-9]+$ ]]; then + echo "Error: Block height must be a positive integer." + exit 1 + fi + node analytics.js --metric-type checkBlockHash --block-height "$blockHeight" break ;; "Quit") diff --git a/.devnet/clean.sh b/.devnet/clean.sh index c3807d8e91..d06f8d5962 100755 --- a/.devnet/clean.sh +++ b/.devnet/clean.sh @@ -38,7 +38,7 @@ EOF } # Loop through aws-n nodes and terminate tmux sessions in parallel -for NODE_ID in $(seq 0 $NUM_INSTANCES); do +for NODE_ID in $(seq 0 $(($NUM_INSTANCES - 1))); do terminate_tmux_session $NODE_ID & done diff --git a/.devnet/config.sh b/.devnet/config.sh index 130e21740f..8c791ed6b2 100755 --- a/.devnet/config.sh +++ b/.devnet/config.sh @@ -1,12 +1,12 @@ #!/bin/bash # Read the EC2 instance name from the user -read -p "Enter the EC2 instance name to filter by (e.g. Name) (default: testnet3): " INSTANCE_NAME -INSTANCE_NAME="${INSTANCE_NAME:-testnet3}" +read -p "Enter the EC2 instance name to filter by (e.g. Name) (default: devnet): " INSTANCE_NAME +INSTANCE_NAME="${INSTANCE_NAME:-devnet}" # Read the PEM file path from the user or use the default in ~/.ssh -read -p "Enter the PEM file path (default: ~/.ssh/s3-testnet3.pem): " PEM_FILE -PEM_FILE="${PEM_FILE:-~/.ssh/s3-testnet3.pem}" +read -p "Enter the PEM file path (default: ~/.ssh/s3-devnet.pem): " PEM_FILE +PEM_FILE="${PEM_FILE:-~/.ssh/s3-devnet.pem}" # Use the AWS CLI to describe running EC2 instances, filter by the provided name, and store the JSON output in a variable instance_info=$(aws ec2 describe-instances \ diff --git a/.devnet/fetch-logs.sh b/.devnet/fetch-logs.sh new file mode 100755 index 0000000000..aa518e584f --- /dev/null +++ b/.devnet/fetch-logs.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Determine the number of AWS EC2 instances by checking ~/.ssh/config +NODE_ID=0 +while [ -n "$(grep "aws-n${NODE_ID}" ~/.ssh/config)" ]; do + NODE_ID=$((NODE_ID + 1)) +done + +# Read the number of AWS EC2 instances to query from the user +read -p "Enter the number of AWS EC2 instances to query (default: $NODE_ID): " NUM_INSTANCES +NUM_INSTANCES="${NUM_INSTANCES:-$NODE_ID}" + +echo "Using $NUM_INSTANCES AWS EC2 instances for querying." + +# Define the directory where logs will be saved +log_directory="$HOME/snarkos_logs" + +# Create the log directory if it doesn't already exist +mkdir -p "$log_directory" + +# Loop from 0 to 49 +for i in $(seq 0 $(($NUM_INSTANCES - 1))); do + echo "Connecting to aws-n$i..." + # Use sftp to connect, execute commands, and exit + sftp aws-n$i << EOF +cd /tmp +get snarkos.log "$log_directory/snarkos-$i.log" +EOF + echo "Downloaded snarkos.log from aws-n$i as snarkos-$i.log into $log_directory" +done + +echo "All files have been downloaded to $log_directory." diff --git a/.devnet/install.sh b/.devnet/install.sh index a903c83072..82054f11f1 100755 --- a/.devnet/install.sh +++ b/.devnet/install.sh @@ -1,8 +1,8 @@ #!/bin/bash -# Prompt the user for the branch to install (default is "testnet3") -read -p "Enter the branch to install (default: testnet3): " BRANCH -BRANCH=${BRANCH:-testnet3} +# Prompt the user for the branch to install (default is "mainnet") +read -p "Enter the branch to install (default: mainnet): " BRANCH +BRANCH=${BRANCH:-mainnet} # Determine the number of AWS EC2 instances by checking ~/.ssh/config NODE_ID=0 @@ -50,7 +50,7 @@ EOF } # Loop through aws-n nodes and run installations in parallel -for NODE_ID in $(seq 0 $NUM_INSTANCES); do +for NODE_ID in $(seq 0 $(($NUM_INSTANCES - 1))); do run_installation $NODE_ID $BRANCH & done diff --git a/.devnet/reinstall.sh b/.devnet/reinstall.sh index 3db5b4f533..624fa7ab13 100755 --- a/.devnet/reinstall.sh +++ b/.devnet/reinstall.sh @@ -1,8 +1,8 @@ #!/bin/bash -# Prompt the user for the branch to install (default is "testnet3") -read -p "Enter the branch to install (default: testnet3): " BRANCH -BRANCH=${BRANCH:-testnet3} +# Prompt the user for the branch to install (default is "mainnet") +read -p "Enter the branch to install (default: mainnet): " BRANCH +BRANCH=${BRANCH:-mainnet} # Determine the number of AWS EC2 instances by checking ~/.ssh/config NODE_ID=0 @@ -54,7 +54,7 @@ EOF } # Loop through aws-n nodes and run installations in parallel -for NODE_ID in $(seq 0 $NUM_INSTANCES); do +for NODE_ID in $(seq 0 $(($NUM_INSTANCES - 1))); do run_installation $NODE_ID $BRANCH & done diff --git a/.devnet/start.sh b/.devnet/start.sh index 755cacf31b..77294f080c 100755 --- a/.devnet/start.sh +++ b/.devnet/start.sh @@ -37,7 +37,7 @@ start_snarkos_in_tmux() { tmux new-session -d -s snarkos-session # Send the snarkOS start command to the tmux session with the NODE_ID - tmux send-keys -t "snarkos-session" "snarkos start --nodisplay --bft 0.0.0.0:5000 --rest 0.0.0.0:3033 --peers $NODE_IP:4133 --validators $NODE_IP:5000 --verbosity $VERBOSITY --dev $NODE_ID --dev-num-validators $NUM_INSTANCES --validator --metrics" C-m + tmux send-keys -t "snarkos-session" "snarkos start --nodisplay --bft 0.0.0.0:5000 --rest 0.0.0.0:3030 --allow-external-peers --peers $NODE_IP:4130 --validators $NODE_IP:5000 --rest-rps 1000 --verbosity $VERBOSITY --dev $NODE_ID --dev-num-validators $NUM_INSTANCES --validator --metrics" C-m exit # Exit root user EOF @@ -51,7 +51,7 @@ EOF } # Loop through aws-n nodes and start snarkOS in tmux sessions in parallel -for NODE_ID in $(seq 0 $NUM_INSTANCES); do +for NODE_ID in $(seq 0 $(($NUM_INSTANCES - 1))); do start_snarkos_in_tmux $NODE_ID "$NODE_0_IP" & done diff --git a/.devnet/start_sync_test.sh b/.devnet/start_sync_test.sh new file mode 100755 index 0000000000..6ca5fbb77a --- /dev/null +++ b/.devnet/start_sync_test.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Determine the number of AWS EC2 instances by checking ~/.ssh/config +NODE_ID=0 +while [ -n "$(grep "aws-n${NODE_ID}" ~/.ssh/config)" ]; do + NODE_ID=$((NODE_ID + 1)) +done + +# Read the number of AWS EC2 instances to query from the user +read -p "Enter the number of AWS EC2 instances to query (default: $NODE_ID): " NUM_INSTANCES +NUM_INSTANCES="${NUM_INSTANCES:-$NODE_ID}" + +echo "Using $NUM_INSTANCES AWS EC2 instances for querying." + +# Read the verbosity level from the user (default: 1) +read -p "Enter the verbosity level (default: 1): " VERBOSITY +VERBOSITY="${VERBOSITY:-1}" + +echo "Using verbosity level $VERBOSITY." + +# Get the IP address of NODE 0 from the SSH config for aws-n0 +NODE_0_IP=$(awk '/Host aws-n0/{f=1} f&&/HostName/{print $2; exit}' ~/.ssh/config) + +# Define a function to start snarkOS in a tmux session on a node +start_snarkos_in_tmux() { + local NODE_ID=$1 + local NODE_IP=$2 + + # SSH into the node and start snarkOS in a new tmux session + ssh -o StrictHostKeyChecking=no aws-n$NODE_ID << EOF + # Commands to run on the remote instance + sudo -i # Switch to root user + WORKSPACE=~/snarkOS + cd \$WORKSPACE + + # Start snarkOS within a new tmux session named "snarkos-session" + tmux new-session -d -s snarkos-session + + # Send the snarkOS start command to the tmux session with the NODE_ID + tmux send-keys -t "snarkos-session" "snarkos start --client --nocdn --nodisplay --rest 0.0.0.0:3030 --node 0.0.0.0:4130 --verbosity 4 --metrics --logfile "/tmp/snarkos-syncing-range-3.log" --peers 167.71.249.65:4130,157.245.218.195:4130,167.71.249.55:4130" C-m + + exit # Exit root user +EOF + + # Check the exit status of the SSH command + if [ $? -eq 0 ]; then + echo "snarkOS started successfully in a tmux session on aws-n$NODE_ID." + else + echo "Failed to start snarkOS in a tmux session on aws-n$NODE_ID." + fi +} + +# Loop through aws-n nodes and start snarkOS in tmux sessions in parallel +for NODE_ID in $(seq 0 $(($NUM_INSTANCES - 1))); do + start_snarkos_in_tmux $NODE_ID "$NODE_0_IP" & +done + +# Wait for all background jobs to finish +wait diff --git a/.devnet/stop.sh b/.devnet/stop.sh index 494e62776d..8899eafd24 100755 --- a/.devnet/stop.sh +++ b/.devnet/stop.sh @@ -34,7 +34,7 @@ EOF } # Loop through aws-n nodes and terminate tmux sessions in parallel -for NODE_ID in $(seq 0 $NUM_INSTANCES); do +for NODE_ID in $(seq 0 $(($NUM_INSTANCES - 1))); do terminate_tmux_session $NODE_ID & done diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8ab1084ea6..85dabf229b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,5 +5,5 @@ updates: schedule: interval: daily time: "10:00" - target-branch: "testnet3" + target-branch: "mainnet" open-pull-requests-limit: 10 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f0c9ccbdba..03ca1330c3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -35,16 +35,16 @@ jobs: mkdir tempdir mv target/release/snarkos tempdir cd tempdir - zip -r aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-unknown-linux-gnu.zip snarkos + zip -r aleo-${{ steps.get_version.outputs.version }}-x86_64-unknown-linux-gnu.zip snarkos cd .. - mv tempdir/aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-unknown-linux-gnu.zip . + mv tempdir/aleo-${{ steps.get_version.outputs.version }}-x86_64-unknown-linux-gnu.zip . - name: Release uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') with: files: | - aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-unknown-linux-gnu.zip + aleo-${{ steps.get_version.outputs.version }}-x86_64-unknown-linux-gnu.zip env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -75,16 +75,16 @@ jobs: mkdir tempdir mv target/release/snarkos tempdir cd tempdir - zip -r aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-apple-darwin.zip snarkos + zip -r aleo-${{ steps.get_version.outputs.version }}-x86_64-apple-darwin.zip snarkos cd .. - mv tempdir/aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-apple-darwin.zip . + mv tempdir/aleo-${{ steps.get_version.outputs.version }}-x86_64-apple-darwin.zip . - name: Release uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') with: files: | - aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-apple-darwin.zip + aleo-${{ steps.get_version.outputs.version }}-x86_64-apple-darwin.zip env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -122,13 +122,13 @@ jobs: - name: Zip run: | - Compress-Archive target/release/snarkos.exe aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-pc-windows-msvc.zip + Compress-Archive target/release/snarkos.exe aleo-${{ steps.get_version.outputs.version }}-x86_64-pc-windows-msvc.zip - name: Release uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') with: files: | - aleo-testnet1-${{ steps.get_version.outputs.version }}-x86_64-pc-windows-msvc.zip + aleo-${{ steps.get_version.outputs.version }}-x86_64-pc-windows-msvc.zip env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.integration/src/lib.rs b/.integration/src/lib.rs index cddebc32cb..02e18ed396 100644 --- a/.integration/src/lib.rs +++ b/.integration/src/lib.rs @@ -23,13 +23,13 @@ mod tests { store::helpers::memory::ConsensusMemory, FromBytes, Ledger, + MainnetV0, Network, - Testnet3, }; use tracing_test::traced_test; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; const TEST_BASE_URL: &str = "https://testnet3.blocks.aleo.org/phase3"; diff --git a/Cargo.lock b/Cargo.lock index 07b3b46fe4..05f3cd55d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,46 +17,11 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "ghash", - "subtle", -] - [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -66,9 +31,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -115,7 +80,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72f2a841f04c2eaeb5a95312e5201a9e4b7c95b64ca99870d6bd2e2376df540a" dependencies = [ "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -126,7 +91,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6118baab6285accf088b31d5ea5029c37bbf9d98e62b4d8720a0a5a66bc2e427" dependencies = [ "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -141,9 +106,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -171,9 +136,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.8" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628a8f9bd1e24b4e0db2b4bc2d000b001e7dd032d54afa60a68836aeec5aa54a" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -185,9 +150,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -219,9 +184,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "arrayref" @@ -237,13 +202,13 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-recursion" -version = "1.0.5" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -264,41 +229,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "axum" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1236b4b292f6c4d6dc34604bb5120d85c3fe1d1aa596bd5cc52ca054d13e7b9e" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", "axum-core", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-util", "itoa", "matchit", @@ -311,7 +276,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.1", "tokio", "tower", "tower-layer", @@ -328,13 +293,13 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", "tracing", @@ -342,16 +307,16 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "895ff42f72016617773af68fb90da2a9677d89c62338ec09162d4909d86fdd8f" +checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733" dependencies = [ "axum", "axum-core", "bytes", "futures-util", "headers", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "mime", @@ -361,13 +326,14 @@ dependencies = [ "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -384,6 +350,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "bech32" version = "0.9.1" @@ -413,11 +385,11 @@ dependencies = [ "peeking_take_while", "prettyplease", "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -443,9 +415,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "blake2" @@ -478,18 +450,18 @@ dependencies = [ [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "tinyvec", ] [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byteorder" @@ -499,9 +471,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bzip2-sys" @@ -522,9 +494,9 @@ checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41" dependencies = [ "jobserver", "libc", @@ -545,40 +517,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "chacha20" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "chacha20poly1305" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" -dependencies = [ - "aead", - "chacha20", - "cipher", - "poly1305", - "zeroize", -] - [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -590,17 +538,6 @@ dependencies = [ "envmnt", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", - "zeroize", -] - [[package]] name = "clang-sys" version = "1.7.0" @@ -614,9 +551,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -624,9 +561,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -636,21 +573,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "colorchoice" @@ -714,9 +651,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -752,7 +689,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "crossterm_winapi", "libc", "mio", @@ -784,39 +721,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "rand_core", "typenum", ] -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - [[package]] name = "curl" -version = "0.4.44" +version = "0.4.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "509bd11746c7ac09ebd19f0b17782eae80aadee26237658a6b4808afb5c11a22" +checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6" dependencies = [ "curl-sys", "libc", "openssl-probe", "openssl-sys", "schannel", - "socket2 0.4.10", - "winapi", + "socket2", + "windows-sys 0.52.0", ] [[package]] name = "curl-sys" -version = "0.4.70+curl-8.5.0" +version = "0.4.72+curl-8.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0333d8849afe78a4c8102a429a446bfdd055832af071945520e835ae2d841e" +checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" dependencies = [ "cc", "libc", @@ -824,34 +751,7 @@ dependencies = [ "openssl-sys", "pkg-config", "vcpkg", - "windows-sys 0.48.0", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" -dependencies = [ - "cfg-if", - "cpufeatures", - "curve25519-dalek-derive", - "fiat-crypto", - "platforms", - "rustc_version", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" -dependencies = [ - "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "windows-sys 0.52.0", ] [[package]] @@ -930,9 +830,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "encode_unicode" @@ -942,9 +842,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -1002,15 +902,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" - -[[package]] -name = "fiat-crypto" -version = "0.2.5" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "flate2" @@ -1130,8 +1024,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -1148,9 +1042,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -1200,9 +1094,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -1211,16 +1105,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval", -] - [[package]] name = "gimli" version = "0.28.1" @@ -1235,9 +1119,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "governor" -version = "0.6.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "821239e5672ff23e2a7060901fa622950bbd80b649cdaadd78d1c1767ed14eb4" +checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" dependencies = [ "cfg-if", "dashmap", @@ -1246,43 +1130,26 @@ dependencies = [ "no-std-compat", "nonzero_ext", "parking_lot", - "quanta 0.11.1", + "portable-atomic", + "quanta", "rand", "smallvec", + "spinning_top", ] [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.11", - "indexmap 2.1.0", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "h2" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31d030e59af851932b72ceebadf4a2b5986dba4c3b99dd2493f8273a0f151943" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 1.0.0", - "indexmap 2.1.0", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -1295,15 +1162,6 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -[[package]] -name = "hashbrown" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" -dependencies = [ - "ahash", -] - [[package]] name = "hashbrown" version = "0.14.3" @@ -1320,10 +1178,10 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" dependencies = [ - "base64", + "base64 0.21.7", "bytes", "headers-core", - "http 1.0.0", + "http 1.1.0", "httpdate", "mime", "sha1", @@ -1335,7 +1193,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" dependencies = [ - "http 1.0.0", + "http 1.1.0", ] [[package]] @@ -1344,11 +1202,17 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" -version = "0.3.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d3d0e0f38255e7fa3cf31335b3a56f05febd18025f4db5ef7a0cfb4f8da651f" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1358,9 +1222,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1369,9 +1233,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1385,7 +1249,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] @@ -1396,18 +1260,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] name = "http-body-util" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", - "futures-util", - "http 1.0.0", + "futures-core", + "http 1.1.0", "http-body 1.0.0", "pin-project-lite", ] @@ -1440,14 +1304,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.24", - "http 0.2.11", + "h2", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2", "tokio", "tower-service", "tracing", @@ -1456,20 +1320,20 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.2", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "httpdate", "itoa", "pin-project-lite", + "smallvec", "tokio", ] @@ -1488,27 +1352,25 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", - "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.2.0", "pin-project-lite", - "socket2 0.5.5", + "socket2", "tokio", - "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.59" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1549,9 +1411,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1561,9 +1423,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" dependencies = [ "console", "instant", @@ -1574,18 +1436,9 @@ dependencies = [ [[package]] name = "indoc" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" - -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array", -] +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "instant" @@ -1632,44 +1485,44 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonwebtoken" -version = "9.2.0" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ - "base64", + "base64 0.21.7", "js-sys", "pem", "ring", @@ -1692,18 +1545,18 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.5", ] [[package]] @@ -1714,13 +1567,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "libc", - "redox_syscall", ] [[package]] @@ -1740,9 +1592,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.14" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "libc", @@ -1774,15 +1626,15 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lru" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ "hashbrown 0.14.3", ] @@ -1797,15 +1649,6 @@ dependencies = [ "libc", ] -[[package]] -name = "mach2" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" -dependencies = [ - "libc", -] - [[package]] name = "matchers" version = "0.0.1" @@ -1832,9 +1675,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memoffset" @@ -1847,9 +1690,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.22.0" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b9e10a211c839210fd7f99954bda26e5f8e26ec686ad68da6a32df7c80e782" +checksum = "2be3cbd384d4e955b231c895ce10685e3d8260c5ccffae898c96c723b0772835" dependencies = [ "ahash", "portable-atomic", @@ -1857,18 +1700,18 @@ dependencies = [ [[package]] name = "metrics-exporter-prometheus" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a4c4718a371ddfb7806378f23617876eea8b82e5ff1324516bcd283249d9ea" +checksum = "9bf4e7146e30ad172c42c39b3246864bd2d3c6396780711a1baf749cfe423e21" dependencies = [ - "base64", + "base64 0.21.7", "hyper 0.14.28", "hyper-tls", - "indexmap 1.9.3", + "indexmap 2.2.6", "ipnet", "metrics", "metrics-util", - "quanta 0.12.2", + "quanta", "thiserror", "tokio", "tracing", @@ -1876,16 +1719,16 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.16.0" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2670b8badcc285d486261e2e9f1615b506baff91427b61bd336a472b65bbf5ed" +checksum = "8b07a5eb561b8cbc16be2d216faf7757f9baf3bfb94dbb0fae3df8387a5bb47f" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.1", + "hashbrown 0.14.3", "metrics", "num_cpus", - "quanta 0.12.2", + "quanta", "sketches-ddsketch", ] @@ -1913,18 +1756,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", @@ -1955,8 +1798,8 @@ checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -2045,15 +1888,21 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -2068,19 +1917,18 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -2120,17 +1968,11 @@ dependencies = [ "parking_lot_core", ] -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - [[package]] name = "open" -version = "5.0.1" +version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90878fb664448b54c4e592455ad02831e23a3f7e157374a8b95654731aac7349" +checksum = "449f0ff855d85ddbf1edd5b646d65249ead3f5e422aaa86b7d2d0b049b103e32" dependencies = [ "is-wsl", "libc", @@ -2139,11 +1981,11 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.62" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -2159,8 +2001,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -2171,9 +2013,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.98" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -2224,14 +2066,12 @@ checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" [[package]] name = "pea2pea" -version = "0.46.0" +version = "0.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1051ff6b30971947b93dc4d76f281a276200a3a0fffd95abe8274e8d92455f2" +checksum = "908db9c51d2d2a4c379bce9395cac8305a464780b0c809969001501743207191" dependencies = [ - "async-trait", "bytes", "futures-util", - "once_cell", "parking_lot", "tokio", "tokio-util", @@ -2240,9 +2080,9 @@ dependencies = [ [[package]] name = "peak_alloc" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84d3978b0aa7d46c34452384c28264ac859c652b67635f6acfd598e1b6608de5" +checksum = "29c4e8e2dd832fd76346468f822e4e600d30ba4e5aa545a128abf12cfae7ea3e" [[package]] name = "peeking_take_while" @@ -2252,11 +2092,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64", + "base64 0.22.0", "serde", ] @@ -2268,29 +2108,29 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -2300,38 +2140,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" - -[[package]] -name = "platforms" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" - -[[package]] -name = "poly1305" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" -dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.6.1" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug", - "universal-hash", -] +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "portable-atomic" @@ -2379,19 +2190,19 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "proc-macro2" -version = "1.0.76" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -2404,13 +2215,13 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -2418,30 +2229,14 @@ dependencies = [ [[package]] name = "quanta" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" -dependencies = [ - "crossbeam-utils", - "libc", - "mach2", - "once_cell", - "raw-cpuid 10.7.0", - "wasi", - "web-sys", - "winapi", -] - -[[package]] -name = "quanta" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" dependencies = [ "crossbeam-utils", "libc", "once_cell", - "raw-cpuid 11.0.1", + "raw-cpuid", "wasi", "web-sys", "winapi", @@ -2470,9 +2265,9 @@ checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -2532,11 +2327,11 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5659e52e4ba6e07b2dad9f1158f578ef84a73762625ddb51536019f34d180eb" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cassowary", "crossterm", "indoc", - "itertools 0.12.0", + "itertools 0.12.1", "lru", "paste", "stability", @@ -2545,29 +2340,20 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "raw-cpuid" version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", ] [[package]] name = "rayon" -version = "1.8.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -2594,9 +2380,9 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", "libredox", @@ -2605,14 +2391,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -2626,13 +2412,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -2643,23 +2429,23 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.3.24", - "http 0.2.11", + "h2", + "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", "hyper-tls", @@ -2671,9 +2457,11 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-native-tls", @@ -2687,16 +2475,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2732,11 +2521,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.30" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -2745,31 +2534,49 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" dependencies = [ "log", "ring", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pki-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.102.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" dependencies = [ "ring", + "rustls-pki-types", "untrusted", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "rusty-fork" @@ -2797,9 +2604,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -2825,21 +2632,11 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -2850,9 +2647,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -2909,37 +2706,37 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.195" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -2947,9 +2744,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd154a240de39fdebcf5775d2675c204d7c13cf39a4c697be6493c8e734337c" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -3048,9 +2845,9 @@ dependencies = [ [[package]] name = "sketches-ddsketch" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" +checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" [[package]] name = "slab" @@ -3063,9 +2860,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol_str" @@ -3122,7 +2919,7 @@ dependencies = [ "clap", "colored", "crossterm", - "indexmap 2.1.0", + "indexmap 2.2.6", "nix", "num_cpus", "parking_lot", @@ -3141,7 +2938,9 @@ dependencies = [ "snarkvm", "sys-info", "thiserror", + "time", "tokio", + "tracing", "tracing-subscriber 0.3.18", "ureq", "zeroize", @@ -3181,7 +2980,7 @@ dependencies = [ "colored", "deadline", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.6", "num_cpus", "once_cell", "parking_lot", @@ -3223,12 +3022,13 @@ dependencies = [ "colored", "deadline", "futures", - "indexmap 2.1.0", - "itertools 0.12.0", + "indexmap 2.2.6", + "itertools 0.12.1", "mockall", "open", "parking_lot", "paste", + "pea2pea", "proptest", "rand", "rand_chacha", @@ -3244,7 +3044,6 @@ dependencies = [ "snarkos-node-sync", "snarkos-node-tcp", "snarkvm", - "snow", "test-strategy", "time", "tokio", @@ -3262,14 +3061,13 @@ version = "2.2.7" dependencies = [ "anyhow", "bytes", - "indexmap 2.1.0", + "indexmap 2.2.6", "proptest", "rayon", "serde", "snarkos-node-metrics", "snarkos-node-sync-locators", "snarkvm", - "snow", "test-strategy", "time", "tokio-util", @@ -3281,9 +3079,11 @@ name = "snarkos-node-bft-ledger-service" version = "2.2.7" dependencies = [ "async-trait", - "indexmap 2.1.0", + "indexmap 2.2.6", + "lru", "parking_lot", "rand", + "snarkos-node-metrics", "snarkvm", "tokio", "tracing", @@ -3294,7 +3094,7 @@ name = "snarkos-node-bft-storage-service" version = "2.2.7" dependencies = [ "aleo-std", - "indexmap 2.1.0", + "indexmap 2.2.6", "parking_lot", "snarkvm", "tracing", @@ -3326,8 +3126,8 @@ dependencies = [ "aleo-std", "anyhow", "colored", - "indexmap 2.1.0", - "itertools 0.12.0", + "indexmap 2.2.6", + "itertools 0.12.1", "lru", "once_cell", "parking_lot", @@ -3348,7 +3148,10 @@ name = "snarkos-node-metrics" version = "2.2.7" dependencies = [ "metrics-exporter-prometheus", + "parking_lot", + "rayon", "snarkvm", + "time", "tokio", ] @@ -3359,8 +3162,8 @@ dependencies = [ "anyhow", "axum", "axum-extra", - "http 1.0.0", - "indexmap 2.1.0", + "http 1.1.0", + "indexmap 2.2.6", "jsonwebtoken", "once_cell", "parking_lot", @@ -3391,7 +3194,7 @@ dependencies = [ "deadline", "futures", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.6", "linked-hash-map", "parking_lot", "peak_alloc", @@ -3420,14 +3223,13 @@ version = "2.2.7" dependencies = [ "anyhow", "bytes", - "indexmap 2.1.0", + "indexmap 2.2.6", "proptest", "rayon", "serde", "snarkos-node-bft-events", "snarkos-node-sync-locators", "snarkvm", - "snow", "test-strategy", "tokio", "tokio-util", @@ -3439,13 +3241,15 @@ name = "snarkos-node-sync" version = "2.2.7" dependencies = [ "anyhow", - "indexmap 2.1.0", - "itertools 0.12.0", + "indexmap 2.2.6", + "itertools 0.12.1", "once_cell", "parking_lot", "rand", "serde", "snarkos-node-bft-ledger-service", + "snarkos-node-metrics", + "snarkos-node-router", "snarkos-node-sync-communication-service", "snarkos-node-sync-locators", "snarkvm", @@ -3466,7 +3270,7 @@ name = "snarkos-node-sync-locators" version = "2.2.7" dependencies = [ "anyhow", - "indexmap 2.1.0", + "indexmap 2.2.6", "serde", "snarkvm", "tracing", @@ -3489,15 +3293,15 @@ dependencies = [ [[package]] name = "snarkvm" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "anstyle", "anyhow", "clap", "colored", "dotenvy", - "indexmap 2.1.0", + "indexmap 2.2.6", "num-format", "once_cell", "parking_lot", @@ -3520,8 +3324,8 @@ dependencies = [ [[package]] name = "snarkvm-algorithms" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "anyhow", @@ -3530,7 +3334,7 @@ dependencies = [ "fxhash", "hashbrown 0.14.3", "hex", - "indexmap 2.1.0", + "indexmap 2.2.6", "itertools 0.11.0", "num-traits", "parking_lot", @@ -3550,8 +3354,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-account", "snarkvm-circuit-algorithms", @@ -3564,8 +3368,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-account" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-algorithms", "snarkvm-circuit-network", @@ -3575,8 +3379,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-algorithms" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-types", "snarkvm-console-algorithms", @@ -3585,8 +3389,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-collections" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-algorithms", "snarkvm-circuit-types", @@ -3595,10 +3399,10 @@ dependencies = [ [[package]] name = "snarkvm-circuit-environment" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "itertools 0.11.0", "nom", "num-traits", @@ -3613,13 +3417,13 @@ dependencies = [ [[package]] name = "snarkvm-circuit-environment-witness" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" [[package]] name = "snarkvm-circuit-network" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-algorithms", "snarkvm-circuit-collections", @@ -3629,8 +3433,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-program" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "paste", "snarkvm-circuit-account", @@ -3644,8 +3448,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-address", @@ -3659,8 +3463,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-address" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3672,8 +3476,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-boolean" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-console-types-boolean", @@ -3681,8 +3485,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-field" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3691,8 +3495,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-group" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3703,8 +3507,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-integers" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3715,8 +3519,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-scalar" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3726,8 +3530,8 @@ dependencies = [ [[package]] name = "snarkvm-circuit-types-string" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-circuit-environment", "snarkvm-circuit-types-boolean", @@ -3738,8 +3542,8 @@ dependencies = [ [[package]] name = "snarkvm-console" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-account", "snarkvm-console-algorithms", @@ -3751,8 +3555,8 @@ dependencies = [ [[package]] name = "snarkvm-console-account" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "bs58", "snarkvm-console-network", @@ -3762,8 +3566,8 @@ dependencies = [ [[package]] name = "snarkvm-console-algorithms" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "blake2s_simd", "smallvec", @@ -3775,8 +3579,8 @@ dependencies = [ [[package]] name = "snarkvm-console-collections" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "rayon", @@ -3786,11 +3590,11 @@ dependencies = [ [[package]] name = "snarkvm-console-network" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "anyhow", - "indexmap 2.1.0", + "indexmap 2.2.6", "itertools 0.11.0", "lazy_static", "once_cell", @@ -3809,8 +3613,8 @@ dependencies = [ [[package]] name = "snarkvm-console-network-environment" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "anyhow", "bech32", @@ -3827,12 +3631,12 @@ dependencies = [ [[package]] name = "snarkvm-console-program" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "enum_index", "enum_index_derive", - "indexmap 2.1.0", + "indexmap 2.2.6", "num-derive", "num-traits", "once_cell", @@ -3848,8 +3652,8 @@ dependencies = [ [[package]] name = "snarkvm-console-types" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-address", @@ -3863,8 +3667,8 @@ dependencies = [ [[package]] name = "snarkvm-console-types-address" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3874,16 +3678,16 @@ dependencies = [ [[package]] name = "snarkvm-console-types-boolean" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", ] [[package]] name = "snarkvm-console-types-field" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3892,8 +3696,8 @@ dependencies = [ [[package]] name = "snarkvm-console-types-group" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3903,8 +3707,8 @@ dependencies = [ [[package]] name = "snarkvm-console-types-integers" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3914,8 +3718,8 @@ dependencies = [ [[package]] name = "snarkvm-console-types-scalar" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3925,8 +3729,8 @@ dependencies = [ [[package]] name = "snarkvm-console-types-string" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console-network-environment", "snarkvm-console-types-boolean", @@ -3936,8 +3740,8 @@ dependencies = [ [[package]] name = "snarkvm-curves" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "rand", "rayon", @@ -3950,8 +3754,8 @@ dependencies = [ [[package]] name = "snarkvm-fields" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "anyhow", @@ -3967,21 +3771,21 @@ dependencies = [ [[package]] name = "snarkvm-ledger" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "anyhow", - "indexmap 2.1.0", + "indexmap 2.2.6", "parking_lot", "rand", "rayon", "snarkvm-console", "snarkvm-ledger-authority", "snarkvm-ledger-block", - "snarkvm-ledger-coinbase", "snarkvm-ledger-committee", "snarkvm-ledger-narwhal", + "snarkvm-ledger-puzzle", "snarkvm-ledger-query", "snarkvm-ledger-store", "snarkvm-ledger-test-helpers", @@ -3992,8 +3796,8 @@ dependencies = [ [[package]] name = "snarkvm-ledger-authority" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "anyhow", "rand", @@ -4004,64 +3808,46 @@ dependencies = [ [[package]] name = "snarkvm-ledger-block" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "rayon", "serde_json", "snarkvm-console", "snarkvm-ledger-authority", - "snarkvm-ledger-coinbase", "snarkvm-ledger-committee", "snarkvm-ledger-narwhal-batch-header", "snarkvm-ledger-narwhal-subdag", "snarkvm-ledger-narwhal-transmission-id", + "snarkvm-ledger-puzzle", "snarkvm-synthesizer-program", "snarkvm-synthesizer-snark", ] -[[package]] -name = "snarkvm-ledger-coinbase" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" -dependencies = [ - "aleo-std", - "anyhow", - "bincode", - "blake2", - "indexmap 2.1.0", - "rayon", - "serde_json", - "snarkvm-algorithms", - "snarkvm-console", - "snarkvm-curves", - "snarkvm-fields", - "snarkvm-synthesizer-snark", - "snarkvm-utilities", -] - [[package]] name = "snarkvm-ledger-committee" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "anyhow", - "indexmap 2.1.0", + "indexmap 2.2.6", "proptest", "rand", "rand_chacha", "rand_distr", + "rayon", "serde_json", "snarkvm-console", + "snarkvm-ledger-narwhal-batch-header", "snarkvm-metrics", "test-strategy", ] [[package]] name = "snarkvm-ledger-narwhal" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-ledger-narwhal-batch-certificate", "snarkvm-ledger-narwhal-batch-header", @@ -4073,10 +3859,10 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-batch-certificate" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "rayon", "serde_json", "snarkvm-console", @@ -4086,10 +3872,11 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-batch-header" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", + "rayon", "serde_json", "snarkvm-console", "snarkvm-ledger-narwhal-transmission-id", @@ -4098,8 +3885,8 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-data" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "bytes", "serde_json", @@ -4109,13 +3896,14 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-subdag" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "rayon", "serde_json", "snarkvm-console", + "snarkvm-ledger-committee", "snarkvm-ledger-narwhal-batch-certificate", "snarkvm-ledger-narwhal-batch-header", "snarkvm-ledger-narwhal-transmission-id", @@ -4123,30 +3911,65 @@ dependencies = [ [[package]] name = "snarkvm-ledger-narwhal-transmission" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "bytes", "serde_json", "snarkvm-console", "snarkvm-ledger-block", - "snarkvm-ledger-coinbase", "snarkvm-ledger-narwhal-data", + "snarkvm-ledger-puzzle", ] [[package]] name = "snarkvm-ledger-narwhal-transmission-id" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "snarkvm-console", - "snarkvm-ledger-coinbase", + "snarkvm-ledger-puzzle", +] + +[[package]] +name = "snarkvm-ledger-puzzle" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" +dependencies = [ + "aleo-std", + "anyhow", + "bincode", + "indexmap 2.2.6", + "lru", + "once_cell", + "parking_lot", + "rand", + "rand_chacha", + "rayon", + "serde_json", + "snarkvm-algorithms", + "snarkvm-console", +] + +[[package]] +name = "snarkvm-ledger-puzzle-epoch" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" +dependencies = [ + "anyhow", + "colored", + "indexmap 2.2.6", + "rand", + "rand_chacha", + "rayon", + "snarkvm-console", + "snarkvm-ledger-puzzle", ] [[package]] name = "snarkvm-ledger-query" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "async-trait", "reqwest", @@ -4158,24 +3981,26 @@ dependencies = [ [[package]] name = "snarkvm-ledger-store" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std-storage", "anyhow", "bincode", - "indexmap 2.1.0", + "indexmap 2.2.6", "once_cell", "parking_lot", "rayon", "rocksdb", "serde", + "serde_json", + "smallvec", "snarkvm-console", "snarkvm-ledger-authority", "snarkvm-ledger-block", - "snarkvm-ledger-coinbase", "snarkvm-ledger-committee", "snarkvm-ledger-narwhal-batch-certificate", + "snarkvm-ledger-puzzle", "snarkvm-synthesizer-program", "snarkvm-synthesizer-snark", "tracing", @@ -4183,8 +4008,8 @@ dependencies = [ [[package]] name = "snarkvm-ledger-test-helpers" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "once_cell", "snarkvm-circuit", @@ -4198,8 +4023,8 @@ dependencies = [ [[package]] name = "snarkvm-metrics" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "metrics", "metrics-exporter-prometheus", @@ -4207,8 +4032,8 @@ dependencies = [ [[package]] name = "snarkvm-parameters" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "anyhow", @@ -4217,7 +4042,7 @@ dependencies = [ "colored", "curl", "hex", - "indexmap 2.1.0", + "indexmap 2.2.6", "itertools 0.11.0", "lazy_static", "parking_lot", @@ -4232,12 +4057,14 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "anyhow", - "indexmap 2.1.0", + "indexmap 2.2.6", + "itertools 0.11.0", + "lru", "parking_lot", "rand", "rayon", @@ -4245,24 +4072,26 @@ dependencies = [ "snarkvm-circuit", "snarkvm-console", "snarkvm-ledger-block", - "snarkvm-ledger-coinbase", "snarkvm-ledger-committee", + "snarkvm-ledger-puzzle", + "snarkvm-ledger-puzzle-epoch", "snarkvm-ledger-query", "snarkvm-ledger-store", "snarkvm-synthesizer-process", "snarkvm-synthesizer-program", "snarkvm-synthesizer-snark", + "snarkvm-utilities", "tracing", ] [[package]] name = "snarkvm-synthesizer-process" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "colored", - "indexmap 2.1.0", + "indexmap 2.2.6", "once_cell", "parking_lot", "rand", @@ -4280,10 +4109,10 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer-program" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.6", "paste", "rand", "rand_chacha", @@ -4294,8 +4123,8 @@ dependencies = [ [[package]] name = "snarkvm-synthesizer-snark" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "bincode", "once_cell", @@ -4307,8 +4136,8 @@ dependencies = [ [[package]] name = "snarkvm-utilities" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "aleo-std", "anyhow", @@ -4328,48 +4157,22 @@ dependencies = [ [[package]] name = "snarkvm-utilities-derives" -version = "0.16.18" -source = "git+https://github.com/AleoHQ/snarkVM.git?rev=2127981#2127981d299636c4d0061aa2a5f69bd49fb4d19a" +version = "0.16.19" +source = "git+https://github.com/AleoNet/snarkVM.git?rev=140ff26#140ff26f87697c2e9d18212cce2cc831fc4b146a" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", -] - -[[package]] -name = "snow" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" -dependencies = [ - "aes-gcm", - "blake2", - "chacha20poly1305", - "curve25519-dalek", - "rand_core", - "rustc_version", - "sha2", - "subtle", -] - -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -4378,21 +4181,30 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + [[package]] name = "stability" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" dependencies = [ - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "structmeta" @@ -4401,9 +4213,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ad9e09554f0456d67a69c1584c9798ba733a5b50349a6c0d0948710523922d" dependencies = [ "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "structmeta-derive", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -4413,8 +4225,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -4432,11 +4244,11 @@ version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "rustversion", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -4463,18 +4275,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.48" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "unicode-ident", ] @@ -4484,6 +4296,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "synom" version = "0.11.3" @@ -4526,13 +4344,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.1", - "redox_syscall", + "fastrand 2.0.2", "rustix", "windows-sys 0.52.0", ] @@ -4550,36 +4367,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8361c808554228ad09bfed70f5c823caf8a3450b6881cc3a38eb57e8c08c1d9" dependencies = [ "proc-macro2", - "quote 1.0.35", + "quote 1.0.36", "structmeta", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -4607,12 +4424,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -4627,10 +4445,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -4660,9 +4479,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -4672,7 +4491,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -4684,8 +4503,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -4700,9 +4519,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -4711,9 +4530,9 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" dependencies = [ "async-stream", "bytes", @@ -4763,14 +4582,14 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da193277a4e2c33e59e09b5861580c33dd0a637c3883d0fa74ba40c0374af2e" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "http-body-util", "http-range-header", @@ -4800,14 +4619,14 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tower_governor" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d31d2cd0776b0e10664d3db2e362b9a8b38a18cb09ba97d3f2f775c54f2c51b" +checksum = "3790eac6ad3fb8d9d96c2b040ae06e2517aa24b067545d1078b96ae72f7bb9a7" dependencies = [ "axum", "forwarded-header-value", "governor", - "http 1.0.0", + "http 1.1.0", "pin-project", "thiserror", "tower", @@ -4833,8 +4652,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -4950,7 +4769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c49adbab879d2e0dd7f75edace5f0ac2156939ecb7e6a1e8fa14e53728328c48" dependencies = [ "lazy_static", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -4961,7 +4780,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08" dependencies = [ "lazy_static", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -5006,18 +4825,18 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" @@ -5031,16 +4850,6 @@ version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "untrusted" version = "0.9.0" @@ -5049,15 +4858,16 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.9.1" +version = "2.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cdd25c339e200129fe4de81451814e5228c9b771d57378817d6117cc2b3f97" +checksum = "11f214ce18d8b2cbe84ed3aa6486ed3f5b285cf8d8fbdbce9f3f767a724adc35" dependencies = [ - "base64", + "base64 0.21.7", "flate2", "log", "once_cell", "rustls", + "rustls-pki-types", "rustls-webpki", "serde", "serde_json", @@ -5117,9 +4927,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -5142,9 +4952,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5152,24 +4962,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.40" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bde2032aeb86bdfaecc8b261eef3cba735cc426c1f3a3416d1e0791be95fc461" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -5179,38 +4989,38 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ - "quote 1.0.35", + "quote 1.0.36", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.67" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -5218,9 +5028,12 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.3" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "winapi" @@ -5259,7 +5072,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -5277,7 +5090,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -5297,17 +5110,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -5318,9 +5132,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -5330,9 +5144,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -5342,9 +5156,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -5354,9 +5174,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -5366,9 +5186,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -5378,9 +5198,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -5390,9 +5210,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winreg" @@ -5420,8 +5240,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] [[package]] @@ -5440,6 +5260,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", - "quote 1.0.35", - "syn 2.0.48", + "quote 1.0.36", + "syn 2.0.58", ] diff --git a/Cargo.toml b/Cargo.toml index b61efc7e7d..c2c1ada9bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ version = "2.2.7" authors = [ "The Aleo Team " ] description = "A decentralized operating system" homepage = "https://aleo.org" -repository = "https://github.com/AleoHQ/snarkOS" +repository = "https://github.com/AleoNet/snarkOS" keywords = [ "aleo", "cryptography", @@ -15,7 +15,7 @@ keywords = [ categories = [ "cryptography", "operating-systems" ] license = "Apache-2.0" edition = "2021" -rust-version = "1.74.1" +rust-version = "1.76.0" # Attention - Change the MSRV in rust-toolchain and in .circleci/config.yml as well [workspace] members = [ @@ -45,8 +45,8 @@ version = "=0.1.24" default-features = false [workspace.dependencies.snarkvm] -git = "https://github.com/AleoHQ/snarkVM.git" -rev = "2127981" +git = "https://github.com/AleoNet/snarkVM.git" +rev = "fddd8b9" #version = "=0.16.18" features = [ "circuit", "console", "rocks" ] @@ -55,7 +55,6 @@ name = "snarkos" path = "snarkos/main.rs" [features] -jemalloc = [ "tikv-jemallocator" ] metrics = [ "snarkos-node-metrics", "snarkos-node/metrics" ] [dependencies.anyhow] @@ -120,9 +119,8 @@ version = "=2.2.7" path = "./node/tcp" version = "=2.2.7" -[dependencies.tikv-jemallocator] -version = "0.5" -optional = true +[target.'cfg(all(target_os = "linux", target_arch = "x86_64"))'.dependencies] +tikv-jemallocator = "0.5" [dev-dependencies.rusty-hook] version = "0.11.2" diff --git a/README.md b/README.md index 432e8f31fd..4f90a2aaa8 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,8 @@ * [2.1 Requirements](#21-requirements) * [2.2 Installation](#22-installation) * [3. Run an Aleo Node](#3-run-an-aleo-node) - * [3a. Run an Aleo Client](#3a-run-an-aleo-client) - * [3b. Run an Aleo Prover](#3b-run-an-aleo-prover) + * [3.1 Run an Aleo Client](#31-run-an-aleo-client) + * [3.2 Run an Aleo Prover](#32-run-an-aleo-prover) * [4. FAQs](#4-faqs) * [5. Command Line Interface](#5-command-line-interface) * [6. Development Guide](#6-development-guide) @@ -92,11 +92,11 @@ Lastly, install `snarkOS`: cargo install --locked --path . ``` -Please ensure ports `4133/tcp` and `3033/tcp` are open on your router and OS firewall. +Please ensure ports `4130/tcp` and `3030/tcp` are open on your router and OS firewall. ## 3. Run an Aleo Node -## 3a. Run an Aleo Client +## 3.1 Run an Aleo Client Start by following the instructions in the [Build Guide](#2-build-guide). @@ -105,7 +105,7 @@ Next, to start a client node, from the `snarkOS` directory, run: ./run-client.sh ``` -## 3b. Run an Aleo Prover +## 3.2 Run an Aleo Prover Start by following the instructions in the [Build Guide](#2-build-guide). @@ -144,7 +144,7 @@ APrivateKey1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ### 2. My node is unable to connect to peers on the network. -- Ensure ports `4133/tcp` and `3033/tcp` are open on your router and OS firewall. +- Ensure ports `4130/tcp` and `3030/tcp` are open on your router and OS firewall. - Ensure `snarkOS` is started using `./run-client.sh` or `./run-prover.sh`. ### 3. I can't generate a new address ### @@ -200,10 +200,10 @@ OPTIONS: --private-key Specify the node's account private key --private-key-file Specify the path to a file containing the node's account private key - --node Specify the IP address and port for the node server [default: 0.0.0.0:4133] + --node Specify the IP address and port for the node server [default: 0.0.0.0:4130] --connect Specify the IP address and port of a peer to connect to - --rest Specify the IP address and port for the REST server [default: 0.0.0.0:3033] + --rest Specify the IP address and port for the REST server [default: 0.0.0.0:3030] --norest If the flag is set, the node will not initialize the REST server --nodisplay If the flag is set, the node will not render the display diff --git a/account/src/lib.rs b/account/src/lib.rs index 11270fe443..684db130ed 100644 --- a/account/src/lib.rs +++ b/account/src/lib.rs @@ -162,9 +162,9 @@ impl Display for Account { #[cfg(test)] mod tests { use super::*; - use snarkvm::prelude::Testnet3; + use snarkvm::prelude::MainnetV0; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; #[test] fn test_sign() { diff --git a/build_ubuntu.sh b/build_ubuntu.sh index 53cad73e4a..84d7379c51 100755 --- a/build_ubuntu.sh +++ b/build_ubuntu.sh @@ -31,12 +31,12 @@ source $HOME/.cargo/env cargo install --locked --path . echo "==================================================" -echo " Attention - Please ensure ports 4133 and 3033" +echo " Attention - Please ensure ports 4130 and 3030" echo " are enabled on your local network." echo "" -echo " Cloud Providers - Enable ports 4133 and 3033" +echo " Cloud Providers - Enable ports 4130 and 3030" echo " in your network firewall" echo "" echo " Home Users - Enable port forwarding or NAT rules" -echo " for 4133 and 3033 on your router." +echo " for 4130 and 3030 on your router." echo "==================================================" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 0ad106cb6e..7fdb352b4f 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -103,6 +103,9 @@ workspace = true [dependencies.sys-info] version = "0.9" +[dependencies.time] +version = "0.3" + [dependencies.thiserror] version = "1.0" @@ -110,6 +113,9 @@ version = "1.0" version = "1.28" features = [ "rt" ] +[dependencies.tracing] +version = "0.1" + [dependencies.tracing-subscriber] version = "0.3" features = [ "env-filter" ] diff --git a/cli/src/commands/account.rs b/cli/src/commands/account.rs index 61f64fa3e1..108b866723 100644 --- a/cli/src/commands/account.rs +++ b/cli/src/commands/account.rs @@ -15,6 +15,7 @@ use snarkvm::{ console::{ account::{Address, PrivateKey, Signature}, + network::{MainnetV0, Network, TestnetV0}, prelude::{Environment, Uniform}, program::{ToFields, Value}, types::Field, @@ -36,13 +37,14 @@ use std::{ }; use zeroize::Zeroize; -type Network = snarkvm::prelude::Testnet3; - /// Commands to manage Aleo accounts. #[derive(Debug, Parser, Zeroize)] pub enum Account { /// Generates a new Aleo account New { + /// Specify the network of the account + #[clap(default_value = "0", long = "network")] + network: u16, /// Seed the RNG with a numeric value #[clap(short = 's', long)] seed: Option, @@ -54,6 +56,9 @@ pub enum Account { discreet: bool, }, Sign { + /// Specify the network of the private key to sign with + #[clap(default_value = "0", long = "network")] + network: u16, /// Specify the account private key of the node #[clap(long = "private-key")] private_key: Option, @@ -71,6 +76,9 @@ pub enum Account { raw: bool, }, Verify { + /// Specify the network of the signature to verify + #[clap(default_value = "0", long = "network")] + network: u16, /// Address to use for verification #[clap(short = 'a', long)] address: String, @@ -87,29 +95,35 @@ pub enum Account { } /// Parse a raw Aleo input into fields -fn aleo_literal_to_fields(input: &str) -> Result>> { - Value::::from_str(input)?.to_fields() +fn aleo_literal_to_fields(input: &str) -> Result>> { + Value::::from_str(input)?.to_fields() } impl Account { pub fn parse(self) -> Result { match self { - Self::New { seed, vanity, discreet } => { + Self::New { network, seed, vanity, discreet } => { // Ensure only the seed or the vanity string is specified. if seed.is_some() && vanity.is_some() { bail!("Cannot specify both the '--seed' and '--vanity' flags"); } - // Generate a vanity account. - if let Some(vanity) = vanity { - Self::new_vanity(&vanity, discreet) - } - // Default to generating a normal account, with an optional seed. - else { - Self::new_seeded(seed, discreet) + match vanity { + // Generate a vanity account for the specified network. + Some(vanity) => match network { + MainnetV0::ID => Self::new_vanity::(vanity.as_str(), discreet), + TestnetV0::ID => Self::new_vanity::(vanity.as_str(), discreet), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }, + // Generate a seeded account for the specified network. + None => match network { + MainnetV0::ID => Self::new_seeded::(seed, discreet), + TestnetV0::ID => Self::new_seeded::(seed, discreet), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }, } } - Self::Sign { message, seed, raw, private_key, private_key_file } => { + Self::Sign { network, message, seed, raw, private_key, private_key_file } => { let key = match (private_key, private_key_file) { (Some(private_key), None) => private_key, (None, Some(private_key_file)) => { @@ -121,16 +135,29 @@ impl Account { bail!("Cannot specify both the '--private-key' and '--private-key-file' flags") } }; - Self::sign(key, message, seed, raw) + + // Sign the message for the specified network. + match network { + MainnetV0::ID => Self::sign::(key, message, seed, raw), + TestnetV0::ID => Self::sign::(key, message, seed, raw), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } + } + Self::Verify { network, address, signature, message, raw } => { + // Verify the signature for the specified network. + match network { + MainnetV0::ID => Self::verify::(address, signature, message, raw), + TestnetV0::ID => Self::verify::(address, signature, message, raw), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } } - Self::Verify { address, signature, message, raw } => Self::verify(address, signature, message, raw), } } /// Generates a new Aleo account with the given vanity string. - fn new_vanity(vanity: &str, discreet: bool) -> Result { + fn new_vanity(vanity: &str, discreet: bool) -> Result { // A closure to generate a new Aleo account. - let sample_account = || snarkos_account::Account::::new(&mut rand::thread_rng()); + let sample_account = || snarkos_account::Account::::new(&mut rand::thread_rng()); const ITERATIONS: u128 = u16::MAX as u128; const ITERATIONS_STR: &str = "65,535"; @@ -201,12 +228,12 @@ impl Account { } /// Generates a new Aleo account with an optional seed. - fn new_seeded(seed: Option, discreet: bool) -> Result { + fn new_seeded(seed: Option, discreet: bool) -> Result { // Recover the seed. let seed = match seed { // Recover the field element deterministically. Some(seed) => { - Field::new(::Field::from_str(&seed).map_err(|e| anyhow!("Invalid seed - {e}"))?) + Field::new(::Field::from_str(&seed).map_err(|e| anyhow!("Invalid seed - {e}"))?) } // Sample a random field element. None => Field::rand(&mut ChaChaRng::from_entropy()), @@ -215,7 +242,7 @@ impl Account { let private_key = PrivateKey::try_from(seed).map_err(|_| anyhow!("Failed to convert the seed into a valid private key"))?; // Construct the account. - let account = snarkos_account::Account::::try_from(private_key)?; + let account = snarkos_account::Account::::try_from(private_key)?; // Print the new Aleo account. if !discreet { return Ok(account.to_string()); @@ -236,13 +263,13 @@ impl Account { } // Sign a message with an Aleo private key - fn sign(key: String, message: String, seed: Option, raw: bool) -> Result { + fn sign(key: String, message: String, seed: Option, raw: bool) -> Result { // Recover the seed. let mut rng = match seed { // Recover the field element deterministically. Some(seed) => { - let field: Field<_> = Field::::new( - ::Field::from_str(&seed).map_err(|e| anyhow!("Invalid seed - {e}"))?, + let field: Field<_> = Field::::new( + ::Field::from_str(&seed).map_err(|e| anyhow!("Invalid seed - {e}"))?, ); // field is always 32 bytes @@ -254,13 +281,13 @@ impl Account { // Parse the private key let private_key = - PrivateKey::::from_str(&key).map_err(|_| anyhow!("Failed to parse a valid private key"))?; + PrivateKey::::from_str(&key).map_err(|_| anyhow!("Failed to parse a valid private key"))?; // Sign the message let signature = if raw { private_key.sign_bytes(message.as_bytes(), &mut rng) } else { let fields = - aleo_literal_to_fields(&message).map_err(|_| anyhow!("Failed to parse a valid Aleo literal"))?; + aleo_literal_to_fields::(&message).map_err(|_| anyhow!("Failed to parse a valid Aleo literal"))?; private_key.sign(&fields, &mut rng) } .map_err(|_| anyhow!("Failed to sign the message"))? @@ -270,12 +297,12 @@ impl Account { } // Verify a signature with an Aleo address - fn verify(address: String, signature: String, message: String, raw: bool) -> Result { + fn verify(address: String, signature: String, message: String, raw: bool) -> Result { // Parse the address - let address = Address::::from_str(&address).map_err(|_| anyhow!("Failed to parse a valid address"))?; + let address = Address::::from_str(&address).map_err(|_| anyhow!("Failed to parse a valid address"))?; // Parse the signature let signature = - Signature::::from_str(&signature).map_err(|_| anyhow!("Failed to parse a valid signature"))?; + Signature::::from_str(&signature).map_err(|_| anyhow!("Failed to parse a valid signature"))?; // Verify the signature let verified = if raw { signature.verify_bytes(&address, message.as_bytes()) @@ -323,7 +350,7 @@ mod tests { #[test] fn test_new() { for _ in 0..3 { - let account = Account::New { seed: None, vanity: None, discreet: false }; + let account = Account::New { network: 0, seed: None, vanity: None, discreet: false }; assert!(account.parse().is_ok()); } } @@ -349,7 +376,7 @@ mod tests { ); let vanity = None; - let account = Account::New { seed, vanity, discreet: false }; + let account = Account::New { network: 0, seed, vanity, discreet: false }; let actual = account.parse().unwrap(); assert_eq!(expected, actual); } @@ -375,7 +402,7 @@ mod tests { ); let vanity = None; - let account = Account::New { seed, vanity, discreet: false }; + let account = Account::New { network: 0, seed, vanity, discreet: false }; let actual = account.parse().unwrap(); assert_eq!(expected, actual); } @@ -384,7 +411,14 @@ mod tests { fn test_signature_raw() { let key = "APrivateKey1zkp61PAYmrYEKLtRWeWhUoDpFnGLNuHrCciSqN49T86dw3p".to_string(); let message = "Hello, world!".to_string(); - let account = Account::Sign { private_key: Some(key), private_key_file: None, message, seed: None, raw: true }; + let account = Account::Sign { + network: 0, + private_key: Some(key), + private_key_file: None, + message, + seed: None, + raw: true, + }; assert!(account.parse().is_ok()); } @@ -392,7 +426,14 @@ mod tests { fn test_signature() { let key = "APrivateKey1zkp61PAYmrYEKLtRWeWhUoDpFnGLNuHrCciSqN49T86dw3p".to_string(); let message = "5field".to_string(); - let account = Account::Sign { private_key: Some(key), private_key_file: None, message, seed: None, raw: false }; + let account = Account::Sign { + network: 0, + private_key: Some(key), + private_key_file: None, + message, + seed: None, + raw: false, + }; assert!(account.parse().is_ok()); } @@ -400,7 +441,14 @@ mod tests { fn test_signature_fail() { let key = "APrivateKey1zkp61PAYmrYEKLtRWeWhUoDpFnGLNuHrCciSqN49T86dw3p".to_string(); let message = "not a literal value".to_string(); - let account = Account::Sign { private_key: Some(key), private_key_file: None, message, seed: None, raw: false }; + let account = Account::Sign { + network: 0, + private_key: Some(key), + private_key_file: None, + message, + seed: None, + raw: false, + }; assert!(account.parse().is_err()); } @@ -410,7 +458,8 @@ mod tests { let key = "APrivateKey1zkp61PAYmrYEKLtRWeWhUoDpFnGLNuHrCciSqN49T86dw3p".to_string(); let message = "Hello, world!".to_string(); let expected = "sign1t2hsaqfhcgvsfg2q3q2stxsffyrvdx98pl0ddkdqngqqtn3vsuprhkv9tkeyzs878ccqp62mfptvvp7m5hjcfnf06cc9pu4khxtkkp8esm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkksrwh0"; - let account = Account::Sign { private_key: Some(key), private_key_file: None, message, seed, raw: true }; + let account = + Account::Sign { network: 0, private_key: Some(key), private_key_file: None, message, seed, raw: true }; let actual = account.parse().unwrap(); assert_eq!(expected, actual); } @@ -421,7 +470,8 @@ mod tests { let key = "APrivateKey1zkp61PAYmrYEKLtRWeWhUoDpFnGLNuHrCciSqN49T86dw3p".to_string(); let message = "5field".to_string(); let expected = "sign16f464jk7zrq0az5jne2zvamhlfkksfj23508tqvmj836jpplkuqefcshgk8k8rx9xxu284fuwaua7fcz3jajvnqynwtymfm0p692vq8esm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qk3re27j"; - let account = Account::Sign { private_key: Some(key), private_key_file: None, message, seed, raw: false }; + let account = + Account::Sign { network: 0, private_key: Some(key), private_key_file: None, message, seed, raw: false }; let actual = account.parse().unwrap(); assert_eq!(expected, actual); } @@ -432,14 +482,14 @@ mod tests { let address = "aleo1zecnqchckrzw7dlsyf65g6z5le2rmys403ecwmcafrag0e030yxqrnlg8j"; let signature = "sign1nnvrjlksrkxdpwsrw8kztjukzhmuhe5zf3srk38h7g32u4kqtqpxn3j5a6k8zrqcfx580a96956nsjvluzt64cqf54pdka9mgksfqp8esm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkwsnaqq".to_string(); let message = "Hello, world!".to_string(); - let account = Account::Verify { address: address.to_string(), signature, message, raw: true }; + let account = Account::Verify { network: 0, address: address.to_string(), signature, message, raw: true }; let actual = account.parse(); assert!(actual.is_ok()); // test signature of "Hello, world!" against the message "Different Message" let signature = "sign1nnvrjlksrkxdpwsrw8kztjukzhmuhe5zf3srk38h7g32u4kqtqpxn3j5a6k8zrqcfx580a96956nsjvluzt64cqf54pdka9mgksfqp8esm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkwsnaqq".to_string(); let message = "Different Message".to_string(); - let account = Account::Verify { address: address.to_string(), signature, message, raw: true }; + let account = Account::Verify { network: 0, address: address.to_string(), signature, message, raw: true }; let actual = account.parse(); assert!(actual.is_err()); @@ -447,14 +497,14 @@ mod tests { let signature = "sign1nnvrjlksrkxdpwsrw8kztjukzhmuhe5zf3srk38h7g32u4kqtqpxn3j5a6k8zrqcfx580a96956nsjvluzt64cqf54pdka9mgksfqp8esm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkwsnaqq".to_string(); let message = "Hello, world!".to_string(); let wrong_address = "aleo1uxl69laseuv3876ksh8k0nd7tvpgjt6ccrgccedpjk9qwyfensxst9ftg5".to_string(); - let account = Account::Verify { address: wrong_address, signature, message, raw: true }; + let account = Account::Verify { network: 0, address: wrong_address, signature, message, raw: true }; let actual = account.parse(); assert!(actual.is_err()); // test a valid signature of "Different Message" let signature = "sign1424ztyt9hcm77nq450gvdszrvtg9kvhc4qadg4nzy9y0ah7wdqq7t36cxal42p9jj8e8pjpmc06lfev9nvffcpqv0cxwyr0a2j2tjqlesm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qk3yrr50".to_string(); let message = "Different Message".to_string(); - let account = Account::Verify { address: address.to_string(), signature, message, raw: true }; + let account = Account::Verify { network: 0, address: address.to_string(), signature, message, raw: true }; let actual = account.parse(); assert!(actual.is_ok()); } @@ -465,14 +515,14 @@ mod tests { let address = "aleo1zecnqchckrzw7dlsyf65g6z5le2rmys403ecwmcafrag0e030yxqrnlg8j"; let signature = "sign1j7swjfnyujt2vme3ulu88wdyh2ddj85arh64qh6c6khvrx8wvsp8z9wtzde0sahqj2qwz8rgzt803c0ceega53l4hks2mf5sfsv36qhesm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkdetews".to_string(); let message = "5field".to_string(); - let account = Account::Verify { address: address.to_string(), signature, message, raw: false }; + let account = Account::Verify { network: 0, address: address.to_string(), signature, message, raw: false }; let actual = account.parse(); assert!(actual.is_ok()); // test signature of 5u8 against the message 10u8 let signature = "sign1j7swjfnyujt2vme3ulu88wdyh2ddj85arh64qh6c6khvrx8wvsp8z9wtzde0sahqj2qwz8rgzt803c0ceega53l4hks2mf5sfsv36qhesm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkdetews".to_string(); let message = "10field".to_string(); - let account = Account::Verify { address: address.to_string(), signature, message, raw: false }; + let account = Account::Verify { network: 0, address: address.to_string(), signature, message, raw: false }; let actual = account.parse(); assert!(actual.is_err()); @@ -480,14 +530,14 @@ mod tests { let signature = "sign1j7swjfnyujt2vme3ulu88wdyh2ddj85arh64qh6c6khvrx8wvsp8z9wtzde0sahqj2qwz8rgzt803c0ceega53l4hks2mf5sfsv36qhesm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qkdetews".to_string(); let message = "5field".to_string(); let wrong_address = "aleo1uxl69laseuv3876ksh8k0nd7tvpgjt6ccrgccedpjk9qwyfensxst9ftg5".to_string(); - let account = Account::Verify { address: wrong_address, signature, message, raw: false }; + let account = Account::Verify { network: 0, address: wrong_address, signature, message, raw: false }; let actual = account.parse(); assert!(actual.is_err()); // test a valid signature of 10u8 let signature = "sign1t9v2t5tljk8pr5t6vkcqgkus0a3v69vryxmfrtwrwg0xtj7yv5qj2nz59e5zcyl50w23lhntxvt6vzeqfyu6dt56698zvfj2l6lz6q0esm5elrqqunzqzmac7kzutl6zk7mqht3c0m9kg4hklv7h2js0qmxavwnpuwyl4lzldl6prs4qeqy9wxyp8y44nnydg3h8sg6ue99qk8rh9kt".to_string(); let message = "10field".to_string(); - let account = Account::Verify { address: address.to_string(), signature, message, raw: false }; + let account = Account::Verify { network: 0, address: address.to_string(), signature, message, raw: false }; let actual = account.parse(); assert!(actual.is_ok()); } diff --git a/cli/src/commands/clean.rs b/cli/src/commands/clean.rs index 88fa7f0212..c1725e3fd2 100644 --- a/cli/src/commands/clean.rs +++ b/cli/src/commands/clean.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use snarkos_node::bft::helpers::proposal_cache_path; + use aleo_std::StorageMode; use anyhow::{bail, Result}; use clap::Parser; @@ -22,7 +24,7 @@ use std::path::PathBuf; #[derive(Debug, Parser)] pub struct Clean { /// Specify the network to remove from storage. - #[clap(default_value = "3", long = "network")] + #[clap(default_value = "0", long = "network")] pub network: u16, /// Enables development mode, specify the unique ID of the local node to clean. #[clap(long)] @@ -35,6 +37,13 @@ pub struct Clean { impl Clean { /// Cleans the snarkOS node storage. pub fn parse(self) -> Result { + // Remove the current proposal cache file, if it exists. + let proposal_cache_path = proposal_cache_path(self.network, self.dev); + if proposal_cache_path.exists() { + if let Err(err) = std::fs::remove_file(&proposal_cache_path) { + bail!("Failed to remove the current proposal cache file at {}: {err}", proposal_cache_path.display()); + } + } // Remove the specified ledger from storage. Self::remove_ledger(self.network, match self.path { Some(path) => StorageMode::Custom(path), diff --git a/cli/src/commands/developer/decrypt.rs b/cli/src/commands/developer/decrypt.rs index ba0e1358a7..9154337de9 100644 --- a/cli/src/commands/developer/decrypt.rs +++ b/cli/src/commands/developer/decrypt.rs @@ -12,9 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::CurrentNetwork; use snarkvm::{ - console::program::Ciphertext, + console::{ + network::{MainnetV0, Network, TestnetV0}, + program::Ciphertext, + }, prelude::{Record, ViewKey}, }; @@ -26,6 +28,9 @@ use zeroize::Zeroize; /// Decrypts a record ciphertext. #[derive(Debug, Parser, Zeroize)] pub struct Decrypt { + /// Specify the network of the ciphertext to decrypt. + #[clap(default_value = "0", long = "network")] + pub network: u16, /// The record ciphertext to decrypt. #[clap(short, long)] pub ciphertext: String, @@ -36,17 +41,21 @@ pub struct Decrypt { impl Decrypt { pub fn parse(self) -> Result { - // Decrypt the ciphertext. - Self::decrypt_ciphertext(&self.ciphertext, &self.view_key) + // Decrypt the ciphertext for the given network. + match self.network { + MainnetV0::ID => Self::decrypt_ciphertext::(&self.ciphertext, &self.view_key), + TestnetV0::ID => Self::decrypt_ciphertext::(&self.ciphertext, &self.view_key), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } } /// Decrypts the ciphertext record with provided the view key. - fn decrypt_ciphertext(ciphertext: &str, view_key: &str) -> Result { + fn decrypt_ciphertext(ciphertext: &str, view_key: &str) -> Result { // Parse the ciphertext record. - let ciphertext_record = Record::>::from_str(ciphertext)?; + let ciphertext_record = Record::>::from_str(ciphertext)?; // Parse the account view key. - let view_key = ViewKey::::from_str(view_key)?; + let view_key = ViewKey::::from_str(view_key)?; match ciphertext_record.decrypt(&view_key) { Ok(plaintext_record) => Ok(plaintext_record.to_string()), @@ -76,6 +85,8 @@ mod tests { ViewKey, }; + type CurrentNetwork = MainnetV0; + const ITERATIONS: usize = 1000; fn construct_ciphertext( @@ -120,7 +131,7 @@ mod tests { // Decrypt the ciphertext. let expected_plaintext = ciphertext.decrypt(&view_key).unwrap(); - let decrypt = Decrypt { ciphertext: ciphertext.to_string(), view_key: view_key.to_string() }; + let decrypt = Decrypt { network: 0, ciphertext: ciphertext.to_string(), view_key: view_key.to_string() }; let plaintext = decrypt.parse().unwrap(); // Check that the decryption is correct. @@ -148,7 +159,8 @@ mod tests { let ciphertext = construct_ciphertext::(view_key, owner, &mut rng).unwrap(); // Enforce that the decryption fails. - let decrypt = Decrypt { ciphertext: ciphertext.to_string(), view_key: incorrect_view_key.to_string() }; + let decrypt = + Decrypt { network: 0, ciphertext: ciphertext.to_string(), view_key: incorrect_view_key.to_string() }; assert!(decrypt.parse().is_err()); } } diff --git a/cli/src/commands/developer/deploy.rs b/cli/src/commands/developer/deploy.rs index 7338fe909b..c084481859 100644 --- a/cli/src/commands/developer/deploy.rs +++ b/cli/src/commands/developer/deploy.rs @@ -12,9 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{CurrentAleo, CurrentNetwork, Developer}; +use super::Developer; use snarkvm::{ - console::program::ProgramOwner, + circuit::{Aleo, AleoTestnetV0, AleoV0}, + console::{ + network::{MainnetV0, Network, TestnetV0}, + program::ProgramOwner, + }, prelude::{ block::Transaction, deployment_cost, @@ -37,7 +41,10 @@ use zeroize::Zeroize; #[derive(Debug, Parser)] pub struct Deploy { /// The name of the program to deploy. - program_id: ProgramID, + program_id: String, + /// Specify the network to create a deployment for. + #[clap(default_value = "0", long = "network")] + pub network: u16, /// A path to a directory containing a manifest file. Defaults to the current working directory. #[clap(long)] path: Option, @@ -82,19 +89,32 @@ impl Deploy { bail!("❌ Please specify one of the following actions: --broadcast, --dry-run, --store"); } + // Construct the deployment for the specified network. + match self.network { + MainnetV0::ID => self.construct_deployment::(), + TestnetV0::ID => self.construct_deployment::(), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } + } + + /// Construct and process the deployment transaction. + fn construct_deployment>(&self) -> Result { // Specify the query let query = Query::from(&self.query); // Retrieve the private key. let private_key = PrivateKey::from_str(&self.private_key)?; + // Retrieve the program ID. + let program_id = ProgramID::from_str(&self.program_id)?; + // Fetch the package from the directory. - let package = Developer::parse_package(self.program_id, &self.path)?; + let package = Developer::parse_package(program_id, &self.path)?; - println!("📦 Creating deployment transaction for '{}'...\n", &self.program_id.to_string().bold()); + println!("📦 Creating deployment transaction for '{}'...\n", &program_id.to_string().bold()); // Generate the deployment - let deployment = package.deploy::(None)?; + let deployment = package.deploy::(None)?; let deployment_id = deployment.to_deployment_id()?; // Generate the deployment transaction. @@ -107,13 +127,13 @@ impl Deploy { Some(path) => StorageMode::Custom(path.clone()), None => StorageMode::Production, }; - let store = ConsensusStore::>::open(storage_mode)?; + let store = ConsensusStore::>::open(storage_mode)?; // Initialize the VM. let vm = VM::from(store)?; // Compute the minimum deployment cost. - let (minimum_deployment_cost, (_, _)) = deployment_cost(&deployment)?; + let (minimum_deployment_cost, (_, _, _)) = deployment_cost(&deployment)?; // Prepare the fees. let fee = match &self.record { @@ -146,16 +166,10 @@ impl Deploy { // Create a new transaction. Transaction::from_deployment(owner, deployment, fee)? }; - println!("✅ Created deployment transaction for '{}'", self.program_id.to_string().bold()); + println!("✅ Created deployment transaction for '{}'", program_id.to_string().bold()); // Determine if the transaction should be broadcast, stored, or displayed to the user. - Developer::handle_transaction( - &self.broadcast, - self.dry_run, - &self.store, - transaction, - self.program_id.to_string(), - ) + Developer::handle_transaction(&self.broadcast, self.dry_run, &self.store, transaction, program_id.to_string()) } } @@ -183,7 +197,8 @@ mod tests { let cli = CLI::parse_from(arg_vec); if let Command::Developer(Developer::Deploy(deploy)) = cli.command { - assert_eq!(deploy.program_id, "hello.aleo".try_into().unwrap()); + assert_eq!(deploy.network, 0); + assert_eq!(deploy.program_id, "hello.aleo"); assert_eq!(deploy.private_key, "PRIVATE_KEY"); assert_eq!(deploy.query, "QUERY"); assert_eq!(deploy.priority_fee, 77); diff --git a/cli/src/commands/developer/execute.rs b/cli/src/commands/developer/execute.rs index 02688d153a..455f258e9c 100644 --- a/cli/src/commands/developer/execute.rs +++ b/cli/src/commands/developer/execute.rs @@ -12,18 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{CurrentNetwork, Developer}; -use snarkvm::prelude::{ - query::Query, - store::{helpers::memory::ConsensusMemory, ConsensusStore}, - Address, - Identifier, - Locator, - PrivateKey, - Process, - ProgramID, - Value, - VM, +use super::Developer; +use snarkvm::{ + console::network::{MainnetV0, Network, TestnetV0}, + prelude::{ + query::Query, + store::{helpers::memory::ConsensusMemory, ConsensusStore}, + Address, + Identifier, + Locator, + PrivateKey, + Process, + ProgramID, + Value, + VM, + }, }; use aleo_std::StorageMode; @@ -37,11 +40,14 @@ use zeroize::Zeroize; #[derive(Debug, Parser)] pub struct Execute { /// The program identifier. - program_id: ProgramID, + program_id: String, /// The function name. - function: Identifier, + function: String, /// The function inputs. - inputs: Vec>, + inputs: Vec, + /// Specify the network to create an execution for. + #[clap(default_value = "0", long = "network")] + pub network: u16, /// The private key used to generate the execution. #[clap(short, long)] private_key: String, @@ -84,13 +90,32 @@ impl Execute { bail!("❌ Please specify one of the following actions: --broadcast, --dry-run, --store"); } + // Construct the execution for the specified network. + match self.network { + MainnetV0::ID => self.construct_execution::(), + TestnetV0::ID => self.construct_execution::(), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } + } + + /// Construct and process the execution transaction. + fn construct_execution(&self) -> Result { // Specify the query let query = Query::from(&self.query); // Retrieve the private key. let private_key = PrivateKey::from_str(&self.private_key)?; - let locator = Locator::::from_str(&format!("{}/{}", self.program_id, self.function))?; + // Retrieve the program ID. + let program_id = ProgramID::from_str(&self.program_id)?; + + // Retrieve the function. + let function = Identifier::from_str(&self.function)?; + + // Retrieve the inputs. + let inputs = self.inputs.iter().map(|input| Value::from_str(input)).collect::>>>()?; + + let locator = Locator::::from_str(&format!("{}/{}", program_id, function))?; println!("📦 Creating execution transaction for '{}'...\n", &locator.to_string().bold()); // Generate the execution transaction. @@ -103,13 +128,13 @@ impl Execute { Some(path) => StorageMode::Custom(path.clone()), None => StorageMode::Production, }; - let store = ConsensusStore::>::open(storage_mode)?; + let store = ConsensusStore::>::open(storage_mode)?; // Initialize the VM. let vm = VM::from(store)?; // Load the program and it's imports into the process. - load_program(&self.query, &mut vm.process().write(), &self.program_id)?; + load_program(&self.query, &mut vm.process().write(), &program_id)?; // Prepare the fee. let fee_record = match &self.record { @@ -119,15 +144,7 @@ impl Execute { let priority_fee = self.priority_fee.unwrap_or(0); // Create a new transaction. - vm.execute( - &private_key, - (self.program_id, self.function), - self.inputs.iter(), - fee_record, - priority_fee, - Some(query), - rng, - )? + vm.execute(&private_key, (program_id, function), inputs.iter(), fee_record, priority_fee, Some(query), rng)? }; // Check if the public balance is sufficient. @@ -165,11 +182,7 @@ impl Execute { } /// A helper function to recursively load the program and all of its imports into the process. -fn load_program( - endpoint: &str, - process: &mut Process, - program_id: &ProgramID, -) -> Result<()> { +fn load_program(endpoint: &str, process: &mut Process, program_id: &ProgramID) -> Result<()> { // Fetch the program. let program = Developer::fetch_program(program_id, endpoint)?; @@ -222,13 +235,14 @@ mod tests { let cli = CLI::parse_from(arg_vec); if let Command::Developer(Developer::Execute(execute)) = cli.command { + assert_eq!(execute.network, 0); assert_eq!(execute.private_key, "PRIVATE_KEY"); assert_eq!(execute.query, "QUERY"); assert_eq!(execute.priority_fee, Some(77)); assert_eq!(execute.record, Some("RECORD".into())); - assert_eq!(execute.program_id, "hello.aleo".try_into().unwrap()); - assert_eq!(execute.function, "hello".try_into().unwrap()); - assert_eq!(execute.inputs, vec!["1u32".try_into().unwrap(), "2u32".try_into().unwrap()]); + assert_eq!(execute.program_id, "hello.aleo".to_string()); + assert_eq!(execute.function, "hello".to_string()); + assert_eq!(execute.inputs, vec!["1u32".to_string(), "2u32".to_string()]); } else { panic!("Unexpected result of clap parsing!"); } diff --git a/cli/src/commands/developer/mod.rs b/cli/src/commands/developer/mod.rs index 5eac33aad0..fcdf6b2ea2 100644 --- a/cli/src/commands/developer/mod.rs +++ b/cli/src/commands/developer/mod.rs @@ -28,6 +28,7 @@ mod transfer_private; pub use transfer_private::*; use snarkvm::{ + console::network::Network, package::Package, prelude::{ block::Transaction, @@ -51,9 +52,6 @@ use clap::Parser; use colored::Colorize; use std::{path::PathBuf, str::FromStr}; -type CurrentAleo = snarkvm::circuit::AleoV0; -type CurrentNetwork = snarkvm::prelude::Testnet3; - /// Commands to deploy and execute transactions #[derive(Debug, Parser)] pub enum Developer { @@ -81,7 +79,7 @@ impl Developer { } /// Parse the package from the directory. - fn parse_package(program_id: ProgramID, path: &Option) -> Result> { + fn parse_package(program_id: ProgramID, path: &Option) -> Result> { // Instantiate a path to the directory containing the manifest file. let directory = match path { Some(path) => PathBuf::from_str(path)?, @@ -101,27 +99,31 @@ impl Developer { } /// Parses the record string. If the string is a ciphertext, then attempt to decrypt it. - fn parse_record( - private_key: &PrivateKey, - record: &str, - ) -> Result>> { + fn parse_record(private_key: &PrivateKey, record: &str) -> Result>> { match record.starts_with("record1") { true => { // Parse the ciphertext. - let ciphertext = Record::>::from_str(record)?; + let ciphertext = Record::>::from_str(record)?; // Derive the view key. let view_key = ViewKey::try_from(private_key)?; // Decrypt the ciphertext. ciphertext.decrypt(&view_key) } - false => Record::>::from_str(record), + false => Record::>::from_str(record), } } /// Fetch the program from the given endpoint. - fn fetch_program(program_id: &ProgramID, endpoint: &str) -> Result> { + fn fetch_program(program_id: &ProgramID, endpoint: &str) -> Result> { + // Get the network being used. + let network = match N::ID { + snarkvm::console::network::MainnetV0::ID => "mainnet", + snarkvm::console::network::TestnetV0::ID => "testnet", + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }; + // Send a request to the query node. - let response = ureq::get(&format!("{endpoint}/testnet3/program/{program_id}")).call(); + let response = ureq::get(&format!("{endpoint}/{network}/program/{program_id}")).call(); // Deserialize the program. match response { @@ -136,17 +138,24 @@ impl Developer { } /// Fetch the public balance in microcredits associated with the address from the given endpoint. - fn get_public_balance(address: &Address, endpoint: &str) -> Result { + fn get_public_balance(address: &Address, endpoint: &str) -> Result { // Initialize the program id and account identifier. - let credits = ProgramID::::from_str("credits.aleo")?; - let account_mapping = Identifier::::from_str("account")?; + let credits = ProgramID::::from_str("credits.aleo")?; + let account_mapping = Identifier::::from_str("account")?; + + // Get the network being used. + let network = match N::ID { + snarkvm::console::network::MainnetV0::ID => "mainnet", + snarkvm::console::network::TestnetV0::ID => "testnet", + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }; // Send a request to the query node. let response = - ureq::get(&format!("{endpoint}/testnet3/program/{credits}/mapping/{account_mapping}/{address}")).call(); + ureq::get(&format!("{endpoint}/{network}/program/{credits}/mapping/{account_mapping}/{address}")).call(); // Deserialize the balance. - let balance: Result>> = match response { + let balance: Result>> = match response { Ok(response) => response.into_json().map_err(|err| err.into()), Err(err) => match err { ureq::Error::Status(_status, response) => { @@ -158,7 +167,7 @@ impl Developer { // Return the balance in microcredits. match balance { - Ok(Some(Value::Plaintext(Plaintext::Literal(Literal::::U64(amount), _)))) => Ok(*amount), + Ok(Some(Value::Plaintext(Plaintext::Literal(Literal::::U64(amount), _)))) => Ok(*amount), Ok(None) => Ok(0), Ok(Some(..)) => bail!("Failed to deserialize balance for {address}"), Err(err) => bail!("Failed to fetch balance for {address}: {err}"), @@ -166,11 +175,11 @@ impl Developer { } /// Determine if the transaction should be broadcast or displayed to user. - fn handle_transaction( + fn handle_transaction( broadcast: &Option, dry_run: bool, store: &Option, - transaction: Transaction, + transaction: Transaction, operation: String, ) -> Result { // Get the transaction id. diff --git a/cli/src/commands/developer/scan.rs b/cli/src/commands/developer/scan.rs index 69643126d4..891295a092 100644 --- a/cli/src/commands/developer/scan.rs +++ b/cli/src/commands/developer/scan.rs @@ -14,8 +14,10 @@ #![allow(clippy::type_complexity)] -use super::CurrentNetwork; -use snarkvm::prelude::{block::Block, Ciphertext, Field, FromBytes, Network, Plaintext, PrivateKey, Record, ViewKey}; +use snarkvm::{ + console::network::{MainnetV0, Network, TestnetV0}, + prelude::{block::Block, Ciphertext, Field, FromBytes, Plaintext, PrivateKey, Record, ViewKey}, +}; use anyhow::{bail, ensure, Result}; use clap::Parser; @@ -28,11 +30,16 @@ use std::{ use zeroize::Zeroize; const MAX_BLOCK_RANGE: u32 = 50; +// TODO (raychu86): This should be configurable based on network. const CDN_ENDPOINT: &str = "https://s3.us-west-1.amazonaws.com/testnet3.blocks/phase3"; /// Scan the snarkOS node for records. #[derive(Debug, Parser, Zeroize)] pub struct Scan { + /// Specify the network to scan. + #[clap(default_value = "0", long = "network")] + pub network: u16, + /// An optional private key scan for unspent records. #[clap(short, long)] private_key: Option, @@ -60,14 +67,24 @@ pub struct Scan { impl Scan { pub fn parse(self) -> Result { + // Scan for records on the given network. + match self.network { + MainnetV0::ID => self.scan_records::(), + TestnetV0::ID => self.scan_records::(), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } + } + + /// Scan the network for records. + fn scan_records(&self) -> Result { // Derive the view key and optional private key. - let (private_key, view_key) = self.parse_account()?; + let (private_key, view_key) = self.parse_account::()?; // Find the start and end height to scan. let (start_height, end_height) = self.parse_block_range()?; // Fetch the records from the network. - let records = Self::fetch_records(private_key, &view_key, &self.endpoint, start_height, end_height)?; + let records = Self::fetch_records::(private_key, &view_key, &self.endpoint, start_height, end_height)?; // Output the decrypted records associated with the view key. if records.is_empty() { @@ -114,6 +131,13 @@ impl Scan { /// Returns the `start` and `end` blocks to scan. fn parse_block_range(&self) -> Result<(u32, u32)> { + // Get the network name. + let network = match self.network { + MainnetV0::ID => "mainnet", + TestnetV0::ID => "testnet", + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }; + match (self.start, self.end, self.last) { (Some(start), Some(end), None) => { ensure!(end > start, "The given scan range is invalid (start = {start}, end = {end})"); @@ -122,7 +146,7 @@ impl Scan { } (Some(start), None, None) => { // Request the latest block height from the endpoint. - let endpoint = format!("{}/testnet3/latest/height", self.endpoint); + let endpoint = format!("{}/{network}/latest/height", self.endpoint); let latest_height = u32::from_str(&ureq::get(&endpoint).call()?.into_string()?)?; // Print a warning message if the user is attempting to scan the whole chain. @@ -135,7 +159,7 @@ impl Scan { (None, Some(end), None) => Ok((0, end)), (None, None, Some(last)) => { // Request the latest block height from the endpoint. - let endpoint = format!("{}/testnet3/latest/height", self.endpoint); + let endpoint = format!("{}/{network}/latest/height", self.endpoint); let latest_height = u32::from_str(&ureq::get(&endpoint).call()?.into_string()?)?; Ok((latest_height.saturating_sub(last), latest_height)) @@ -146,18 +170,25 @@ impl Scan { } /// Fetch owned ciphertext records from the endpoint. - fn fetch_records( - private_key: Option>, - view_key: &ViewKey, + fn fetch_records( + private_key: Option>, + view_key: &ViewKey, endpoint: &str, start_height: u32, end_height: u32, - ) -> Result>>> { + ) -> Result>>> { // Check the bounds of the request. if start_height > end_height { bail!("Invalid block range"); } + // Get the network name. + let network = match N::ID { + MainnetV0::ID => "mainnet", + TestnetV0::ID => "testnet", + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }; + // Derive the x-coordinate of the address corresponding to the given view key. let address_x_coordinate = view_key.to_address().to_x_coordinate(); @@ -172,10 +203,9 @@ impl Scan { stdout().flush()?; // Fetch the genesis block from the endpoint. - let genesis_block: Block = - ureq::get(&format!("{endpoint}/testnet3/block/0")).call()?.into_json()?; + let genesis_block: Block = ureq::get(&format!("{endpoint}/{network}/block/0")).call()?.into_json()?; // Determine if the endpoint is on a development network. - let is_development_network = genesis_block != Block::from_bytes_le(CurrentNetwork::genesis_bytes())?; + let is_development_network = genesis_block != Block::from_bytes_le(N::genesis_bytes())?; // Determine the request start height. let mut request_start = match is_development_network { @@ -210,9 +240,9 @@ impl Scan { let request_end = request_start.saturating_add(num_blocks_to_request); // Establish the endpoint. - let blocks_endpoint = format!("{endpoint}/testnet3/blocks?start={request_start}&end={request_end}"); + let blocks_endpoint = format!("{endpoint}/{network}/blocks?start={request_start}&end={request_end}"); // Fetch blocks - let blocks: Vec> = ureq::get(&blocks_endpoint).call()?.into_json()?; + let blocks: Vec> = ureq::get(&blocks_endpoint).call()?.into_json()?; // Scan the blocks for owned records. for block in &blocks { @@ -232,15 +262,15 @@ impl Scan { /// Scan the blocks from the CDN. #[allow(clippy::too_many_arguments)] - fn scan_from_cdn( + fn scan_from_cdn( start_height: u32, end_height: u32, cdn: String, endpoint: String, - private_key: Option>, - view_key: ViewKey, - address_x_coordinate: Field, - records: Arc>>>>, + private_key: Option>, + view_key: ViewKey, + address_x_coordinate: Field, + records: Arc>>>>, ) -> Result<()> { // Calculate the number of blocks to scan. let total_blocks = end_height.saturating_sub(start_height); @@ -294,13 +324,13 @@ impl Scan { } /// Scan a block for owned records. - fn scan_block( - block: &Block, + fn scan_block( + block: &Block, endpoint: &str, - private_key: Option>, - view_key: &ViewKey, - address_x_coordinate: &Field, - records: Arc>>>>, + private_key: Option>, + view_key: &ViewKey, + address_x_coordinate: &Field, + records: Arc>>>>, ) -> Result<()> { for (commitment, ciphertext_record) in block.records() { // Check if the record is owned by the given view key. @@ -318,21 +348,27 @@ impl Scan { } /// Decrypts the ciphertext record and filters spend record if a private key was provided. - fn decrypt_record( - private_key: Option>, - view_key: &ViewKey, + fn decrypt_record( + private_key: Option>, + view_key: &ViewKey, endpoint: &str, - commitment: Field, - ciphertext_record: &Record>, - ) -> Result>>> { + commitment: Field, + ciphertext_record: &Record>, + ) -> Result>>> { // Check if a private key was provided. if let Some(private_key) = private_key { // Compute the serial number. - let serial_number = - Record::>::serial_number(private_key, commitment)?; + let serial_number = Record::>::serial_number(private_key, commitment)?; + + // Get the network name. + let network = match N::ID { + MainnetV0::ID => "mainnet", + TestnetV0::ID => "testnet", + unknown_id => bail!("Unknown network ID ({unknown_id})"), + }; // Establish the endpoint. - let endpoint = format!("{endpoint}/testnet3/find/transitionID/{serial_number}"); + let endpoint = format!("{endpoint}/{network}/find/transitionID/{serial_number}"); // Check if the record is spent. match ureq::get(&endpoint).call() { @@ -357,9 +393,9 @@ impl Scan { #[cfg(test)] mod tests { use super::*; - use snarkvm::prelude::{TestRng, Testnet3}; + use snarkvm::prelude::{MainnetV0, TestRng}; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; #[test] fn test_parse_account() { diff --git a/cli/src/commands/developer/transfer_private.rs b/cli/src/commands/developer/transfer_private.rs index 1a9d45620f..0d84ea5930 100644 --- a/cli/src/commands/developer/transfer_private.rs +++ b/cli/src/commands/developer/transfer_private.rs @@ -12,15 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{CurrentNetwork, Developer}; -use snarkvm::prelude::{ - query::Query, - store::{helpers::memory::ConsensusMemory, ConsensusStore}, - Address, - Locator, - PrivateKey, - Value, - VM, +use super::Developer; +use snarkvm::{ + console::network::{MainnetV0, Network, TestnetV0}, + prelude::{ + query::Query, + store::{helpers::memory::ConsensusMemory, ConsensusStore}, + Address, + Locator, + PrivateKey, + Value, + VM, + }, }; use aleo_std::StorageMode; @@ -32,12 +35,15 @@ use zeroize::Zeroize; /// Executes the `transfer_private` function in the `credits.aleo` program. #[derive(Debug, Parser)] pub struct TransferPrivate { + /// Specify the network to create a `transfer_private` for. + #[clap(default_value = "0", long = "network")] + pub network: u16, /// The input record used to craft the transfer. #[clap(long)] input_record: String, /// The recipient address. #[clap(long)] - recipient: Address, + recipient: String, /// The number of microcredits to transfer. #[clap(long)] amount: u64, @@ -83,13 +89,26 @@ impl TransferPrivate { bail!("❌ Please specify one of the following actions: --broadcast, --dry-run, --store"); } + // Construct the transfer for the specified network. + match self.network { + MainnetV0::ID => self.construct_transfer_private::(), + TestnetV0::ID => self.construct_transfer_private::(), + unknown_id => bail!("Unknown network ID ({unknown_id})"), + } + } + + /// Construct and process the `transfer_private` transaction. + fn construct_transfer_private(&self) -> Result { // Specify the query let query = Query::from(&self.query); + // Retrieve the recipient. + let recipient = Address::::from_str(&self.recipient)?; + // Retrieve the private key. let private_key = PrivateKey::from_str(&self.private_key)?; - println!("📦 Creating private transfer of {} microcredits to {}...\n", self.amount, self.recipient); + println!("📦 Creating private transfer of {} microcredits to {}...\n", self.amount, recipient); // Generate the transfer_private transaction. let transaction = { @@ -101,7 +120,7 @@ impl TransferPrivate { Some(path) => StorageMode::Custom(path.clone()), None => StorageMode::Production, }; - let store = ConsensusStore::>::open(storage_mode)?; + let store = ConsensusStore::>::open(storage_mode)?; // Initialize the VM. let vm = VM::from(store)?; @@ -114,7 +133,7 @@ impl TransferPrivate { let input_record = Developer::parse_record(&private_key, &self.input_record)?; let inputs = vec![ Value::Record(input_record), - Value::from_str(&format!("{}", self.recipient))?, + Value::from_str(&format!("{}", recipient))?, Value::from_str(&format!("{}u64", self.amount))?, ]; @@ -129,8 +148,8 @@ impl TransferPrivate { rng, )? }; - let locator = Locator::::from_str("credits.aleo/transfer_private")?; - println!("✅ Created private transfer of {} microcredits to {}\n", &self.amount, self.recipient); + let locator = Locator::::from_str("credits.aleo/transfer_private")?; + println!("✅ Created private transfer of {} microcredits to {}\n", &self.amount, recipient); // Determine if the transaction should be broadcast, stored, or displayed to the user. Developer::handle_transaction(&self.broadcast, self.dry_run, &self.store, transaction, locator.to_string()) diff --git a/cli/src/commands/start.rs b/cli/src/commands/start.rs index fdaec4ed58..2cfd66d310 100644 --- a/cli/src/commands/start.rs +++ b/cli/src/commands/start.rs @@ -19,11 +19,11 @@ use snarkvm::{ console::{ account::{Address, PrivateKey}, algorithms::Hash, - network::{Network, Testnet3}, + network::{MainnetV0, Network, TestnetV0}, }, ledger::{ block::Block, - committee::{Committee, MIN_VALIDATOR_STAKE}, + committee::{Committee, MIN_DELEGATOR_STAKE, MIN_VALIDATOR_STAKE}, store::{helpers::memory::ConsensusMemory, ConsensusStore}, }, prelude::{FromBytes, ToBits, ToBytes}, @@ -36,9 +36,15 @@ use anyhow::{bail, ensure, Result}; use clap::Parser; use colored::Colorize; use core::str::FromStr; +use indexmap::IndexMap; use rand::SeedableRng; use rand_chacha::ChaChaRng; -use std::{net::SocketAddr, path::PathBuf}; +use serde::{Deserialize, Serialize}; +use std::{ + net::SocketAddr, + path::PathBuf, + sync::{atomic::AtomicBool, Arc}, +}; use tokio::runtime::{self, Runtime}; /// The recommended minimum number of 'open files' limit for a validator. @@ -51,11 +57,23 @@ const DEVELOPMENT_MODE_RNG_SEED: u64 = 1234567890u64; /// The development mode number of genesis committee members. const DEVELOPMENT_MODE_NUM_GENESIS_COMMITTEE_MEMBERS: u16 = 4; +/// A mapping of `staker_address` to `(validator_address, withdrawal_address, amount)`. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct BondedBalances(IndexMap); + +impl FromStr for BondedBalances { + type Err = serde_json::Error; + + fn from_str(s: &str) -> Result { + serde_json::from_str(s) + } +} + /// Starts the snarkOS node. #[derive(Clone, Debug, Parser)] pub struct Start { /// Specify the network ID of this node - #[clap(default_value = "3", long = "network")] + #[clap(default_value = "0", long = "network")] pub network: u16, /// Specify this node as a validator @@ -76,8 +94,8 @@ pub struct Start { pub private_key_file: Option, /// Specify the IP address and port for the node server - #[clap(default_value = "0.0.0.0:4133", long = "node")] - pub node: SocketAddr, + #[clap(long = "node")] + pub node: Option, /// Specify the IP address and port for the BFT #[clap(long = "bft")] pub bft: Option, @@ -87,9 +105,12 @@ pub struct Start { /// Specify the IP address and port of the validator(s) to connect to #[clap(default_value = "", long = "validators")] pub validators: String, + /// If the flag is set, a node will allow untrusted peers to connect + #[clap(long = "allow-external-peers")] + pub allow_external_peers: bool, /// Specify the IP address and port for the REST server - #[clap(default_value = "0.0.0.0:3033", long = "rest")] + #[clap(default_value = "0.0.0.0:3030", long = "rest")] pub rest: SocketAddr, /// Specify the requests per second (RPS) rate limit per IP for the REST server #[clap(default_value = "10", long = "rest-rps")] @@ -111,6 +132,9 @@ pub struct Start { #[clap(default_value = "false", long = "metrics")] pub metrics: bool, + /// Specify the path to a directory containing the storage database for the ledger + #[clap(long = "storage")] + pub storage: Option, /// Enables the node to prefetch initial blocks from a CDN #[clap(default_value = "https://s3.us-west-1.amazonaws.com/testnet3.blocks/phase3", long = "cdn")] pub cdn: String, @@ -124,25 +148,41 @@ pub struct Start { /// If development mode is enabled, specify the number of genesis validators (default: 4) #[clap(long)] pub dev_num_validators: Option, - /// Specify the path to a directory containing the ledger - #[clap(long = "storage_path")] - pub storage_path: Option, + /// If developtment mode is enabled, specify whether node 0 should generate traffic to drive the network + #[clap(default_value = "false", long = "no-dev-txs")] + pub no_dev_txs: bool, + /// If development mode is enabled, specify the custom bonded balances as a JSON object (default: None) + #[clap(long)] + pub dev_bonded_balances: Option, } impl Start { /// Starts the snarkOS node. pub fn parse(self) -> Result { + // Prepare the shutdown flag. + let shutdown: Arc = Default::default(); + // Initialize the logger. - let log_receiver = crate::helpers::initialize_logger(self.verbosity, self.nodisplay, self.logfile.clone()); + let log_receiver = + crate::helpers::initialize_logger(self.verbosity, self.nodisplay, self.logfile.clone(), shutdown.clone()); // Initialize the runtime. Self::runtime().block_on(async move { // Clone the configurations. let mut cli = self.clone(); // Parse the network. match cli.network { - 3 => { + MainnetV0::ID => { // Parse the node from the configurations. - let node = cli.parse_node::().await.expect("Failed to parse the node"); + let node = cli.parse_node::(shutdown.clone()).await.expect("Failed to parse the node"); + // If the display is enabled, render the display. + if !cli.nodisplay { + // Initialize the display. + Display::start(node, log_receiver).expect("Failed to initialize the display"); + } + } + TestnetV0::ID => { + // Parse the node from the configurations. + let node = cli.parse_node::(shutdown.clone()).await.expect("Failed to parse the node"); // If the display is enabled, render the display. if !cli.nodisplay { // Initialize the display. @@ -248,7 +288,7 @@ impl Start { let _ = PrivateKey::::new(&mut rng)?; } let private_key = PrivateKey::::new(&mut rng)?; - println!("🔑 Your development private key for node {dev} is {}\n", private_key.to_string().bold()); + println!("🔑 Your development private key for node {dev} is {}.\n", private_key.to_string().bold()); private_key }) } @@ -283,11 +323,15 @@ impl Start { } } // Set the node IP to `4130 + dev`. - self.node = SocketAddr::from_str(&format!("0.0.0.0:{}", 4130 + dev))?; + // + // Note: the `node` flag is an option to detect remote devnet testing. + if self.node.is_none() { + self.node = Some(SocketAddr::from_str(&format!("0.0.0.0:{}", 4130 + dev))?); + } // If the `norest` flag is not set, and the `bft` flag was not overridden, // then set the REST IP to `3030 + dev`. // - // Note: the reason the `bft` flag is an option is to detect for remote devnet testing. + // Note: the `bft` flag is an option to detect remote devnet testing. if !self.norest && self.bft.is_none() { self.rest = SocketAddr::from_str(&format!("0.0.0.0:{}", 3030 + dev))?; } @@ -314,24 +358,82 @@ impl Start { // Initialize the development private keys. let development_private_keys = (0..num_committee_members).map(|_| PrivateKey::::new(&mut rng)).collect::>>()?; - - // Construct the committee. - let committee = { - // Calculate the committee stake per member. - let stake_per_member = - N::STARTING_SUPPLY.saturating_div(2).saturating_div(num_committee_members as u64); - ensure!(stake_per_member >= MIN_VALIDATOR_STAKE, "Committee stake per member is too low"); - - // Construct the committee members and distribute stakes evenly among committee members. - let members = development_private_keys - .iter() - .map(|private_key| Ok((Address::try_from(private_key)?, (stake_per_member, true)))) - .collect::>>()?; - - // Output the committee. - Committee::::new(0u64, members)? + // Initialize the development addresses. + let development_addresses = + development_private_keys.iter().map(Address::::try_from).collect::>>()?; + + // Construct the committee based on the state of the bonded balances. + let (committee, bonded_balances) = match &self.dev_bonded_balances { + Some(bonded_balances) => { + // Parse the bonded balances. + let bonded_balances = bonded_balances + .0 + .iter() + .map(|(staker_address, (validator_address, withdrawal_address, amount))| { + let staker_addr = Address::::from_str(staker_address)?; + let validator_addr = Address::::from_str(validator_address)?; + let withdrawal_addr = Address::::from_str(withdrawal_address)?; + Ok((staker_addr, (validator_addr, withdrawal_addr, *amount))) + }) + .collect::>>()?; + + // Construct the committee members. + let mut members = IndexMap::new(); + for (staker_address, (validator_address, _, amount)) in bonded_balances.iter() { + // Ensure that the staking amount is sufficient. + match staker_address == validator_address { + true => ensure!(amount >= &MIN_VALIDATOR_STAKE, "Validator stake is too low"), + false => ensure!(amount >= &MIN_DELEGATOR_STAKE, "Delegator stake is too low"), + } + + // Ensure that the validator address is included in the list of development addresses. + ensure!( + development_addresses.contains(validator_address), + "Validator address {validator_address} is not included in the list of development addresses" + ); + + // Add or update the validator entry in the list of members. + members + .entry(*validator_address) + .and_modify(|(stake, _)| *stake += amount) + .or_insert((*amount, true)); + } + // Construct the committee. + let committee = Committee::::new(0u64, members)?; + (committee, bonded_balances) + } + None => { + // Calculate the committee stake per member. + let stake_per_member = + N::STARTING_SUPPLY.saturating_div(2).saturating_div(num_committee_members as u64); + ensure!(stake_per_member >= MIN_VALIDATOR_STAKE, "Committee stake per member is too low"); + + // Construct the committee members and distribute stakes evenly among committee members. + let members = development_addresses + .iter() + .map(|address| (*address, (stake_per_member, true))) + .collect::>(); + + // Construct the bonded balances. + // Note: The withdrawal address is set to the staker address. + let bonded_balances = members + .iter() + .map(|(address, (stake, _))| (*address, (*address, *address, *stake))) + .collect::>(); + // Construct the committee. + let committee = Committee::::new(0u64, members)?; + + (committee, bonded_balances) + } }; + // Ensure that the number of committee members is correct. + ensure!( + committee.members().len() == num_committee_members as usize, + "Number of committee members {} does not match the expected number of members {num_committee_members}", + committee.members().len() + ); + // Calculate the public balance per validator. let remaining_balance = N::STARTING_SUPPLY.saturating_sub(committee.total_stake()); let public_balance_per_validator = remaining_balance.saturating_div(num_committee_members as u64); @@ -357,7 +459,7 @@ impl Start { } // Construct the genesis block. - load_or_compute_genesis(development_private_keys[0], committee, public_balances, &mut rng) + load_or_compute_genesis(development_private_keys[0], committee, public_balances, bonded_balances, &mut rng) } else { // If the `dev_num_validators` flag is set, inform the user that it is ignored. if self.dev_num_validators.is_some() { @@ -381,7 +483,7 @@ impl Start { /// Returns the node type corresponding to the given configurations. #[rustfmt::skip] - async fn parse_node(&mut self) -> Result> { + async fn parse_node(&mut self, shutdown: Arc) -> Result> { // Print the welcome. println!("{}", crate::helpers::welcome_message()); @@ -402,6 +504,16 @@ impl Start { // Parse the node type. let node_type = self.parse_node_type(); + // Parse the node IP. + let node_ip = match self.node { + Some(node_ip) => node_ip, + None => SocketAddr::from_str("0.0.0.0:4130").unwrap(), + }; + // Parse the BFT IP. + let bft_ip = match self.dev.is_some() { + true => self.bft, + false => None + }; // Parse the REST IP. let rest_ip = match self.norest { true => None, @@ -411,14 +523,13 @@ impl Start { // If the display is not enabled, render the welcome message. if self.nodisplay { // Print the Aleo address. - println!("🪪 Your Aleo address is {}.\n", account.address().to_string().bold()); + println!("👛 Your Aleo address is {}.\n", account.address().to_string().bold()); // Print the node type and network. println!( - "🧭 Starting {} on {} {} at {}.\n", + "🧭 Starting {} on {} at {}.\n", node_type.description().bold(), N::NAME.bold(), - "Phase 3".bold(), - self.node.to_string().bold() + node_ip.to_string().bold() ); // If the node is running a REST server, print the REST IP and JWT. @@ -447,17 +558,28 @@ impl Start { } // Initialize the storage mode. - let storage_mode = match &self.storage_path { + let storage_mode = match &self.storage { Some(path) => StorageMode::Custom(path.clone()), None => StorageMode::from(self.dev), }; + // Determine whether to generate background transactions in dev mode. + let dev_txs = match self.dev { + Some(_) => !self.no_dev_txs, + None => { + // If the `no_dev_txs` flag is set, inform the user that it is ignored. + if self.no_dev_txs { + eprintln!("The '--no-dev-txs' flag is ignored because '--dev' is not set"); + } + false + } + }; + // Initialize the node. - let bft_ip = if self.dev.is_some() { self.bft } else { None }; match node_type { - NodeType::Validator => Node::new_validator(self.node, bft_ip, rest_ip, self.rest_rps, account, &trusted_peers, &trusted_validators, genesis, cdn, storage_mode).await, - NodeType::Prover => Node::new_prover(self.node, account, &trusted_peers, genesis, storage_mode).await, - NodeType::Client => Node::new_client(self.node, rest_ip, self.rest_rps, account, &trusted_peers, genesis, cdn, storage_mode).await, + NodeType::Validator => Node::new_validator(node_ip, bft_ip, rest_ip, self.rest_rps, account, &trusted_peers, &trusted_validators, genesis, cdn, storage_mode, self.allow_external_peers, dev_txs, shutdown.clone()).await, + NodeType::Prover => Node::new_prover(node_ip, account, &trusted_peers, genesis, storage_mode, shutdown.clone()).await, + NodeType::Client => Node::new_client(node_ip, rest_ip, self.rest_rps, account, &trusted_peers, genesis, cdn, storage_mode, shutdown).await, } } @@ -465,23 +587,12 @@ impl Start { fn runtime() -> Runtime { // Retrieve the number of cores. let num_cores = num_cpus::get(); - // Determine the number of main cores. - let main_cores = match num_cores { - // Insufficient - 0..=3 => { - eprintln!("The number of cores is insufficient, at least 4 are needed."); - std::process::exit(1); - } - // Efficiency mode - 4..=8 => 2, - // Standard mode - 9..=16 => 8, - // Performance mode - _ => 16, - }; + // Initialize the number of tokio worker threads, max tokio blocking threads, and rayon cores. + // Note: We intentionally set the number of tokio worker threads and number of rayon cores to be + // more than the number of physical cores, because the node is expected to be I/O-bound. let (num_tokio_worker_threads, max_tokio_blocking_threads, num_rayon_cores_global) = - { (num_cores.min(main_cores), 512, num_cores.saturating_sub(main_cores).max(1)) }; + (2 * num_cores, 512, num_cores); // Initialize the parallelization parameters. rayon::ThreadPoolBuilder::new() @@ -526,29 +637,39 @@ fn load_or_compute_genesis( genesis_private_key: PrivateKey, committee: Committee, public_balances: indexmap::IndexMap, u64>, + bonded_balances: indexmap::IndexMap, (Address, Address, u64)>, rng: &mut ChaChaRng, ) -> Result> { // Construct the preimage. let mut preimage = Vec::new(); + // Input the network ID. + preimage.extend(&N::ID.to_le_bytes()); + // Input the genesis private key, committee, and public balances. preimage.extend(genesis_private_key.to_bytes_le()?); preimage.extend(committee.to_bytes_le()?); preimage.extend(&to_bytes_le![public_balances.iter().collect::>()]?); + preimage.extend(&to_bytes_le![ + bonded_balances + .iter() + .flat_map(|(staker, (validator, withdrawal, amount))| to_bytes_le![staker, validator, withdrawal, amount]) + .collect::>() + ]?); // Input the parameters' metadata. - preimage.extend(snarkvm::parameters::testnet3::BondPublicVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::UnbondPublicVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::UnbondDelegatorAsValidatorVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::ClaimUnbondPublicVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::SetValidatorStateVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::TransferPrivateVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::TransferPublicVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::TransferPrivateToPublicVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::TransferPublicToPrivateVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::FeePrivateVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::FeePublicVerifier::METADATA.as_bytes()); - preimage.extend(snarkvm::parameters::testnet3::InclusionVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::BondPublicVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::UnbondPublicVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::UnbondDelegatorAsValidatorVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::ClaimUnbondPublicVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::SetValidatorStateVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::TransferPrivateVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::TransferPublicVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::TransferPrivateToPublicVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::TransferPublicToPrivateVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::FeePrivateVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::FeePublicVerifier::METADATA.as_bytes()); + preimage.extend(snarkvm::parameters::mainnet::InclusionVerifier::METADATA.as_bytes()); // Initialize the hasher. let hasher = snarkvm::console::algorithms::BHP256::::setup("aleo.dev.block")?; @@ -578,7 +699,7 @@ fn load_or_compute_genesis( // Initialize a new VM. let vm = VM::from(ConsensusStore::>::open(Some(0))?)?; // Initialize the genesis block. - let block = vm.genesis_quorum(&genesis_private_key, committee, public_balances, rng)?; + let block = vm.genesis_quorum(&genesis_private_key, committee, public_balances, bonded_balances, rng)?; // Write the genesis block to the file. std::fs::write(&file_path, block.to_bytes_le()?)?; // Return the genesis block. @@ -589,9 +710,9 @@ fn load_or_compute_genesis( mod tests { use super::*; use crate::commands::{Command, CLI}; - use snarkvm::prelude::Testnet3; + use snarkvm::prelude::MainnetV0; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; #[test] fn test_parse_trusted_peers() { @@ -744,7 +865,7 @@ mod tests { let mut config = Start::try_parse_from(["snarkos", "--dev", "0"].iter()).unwrap(); config.parse_development(&mut trusted_peers, &mut trusted_validators).unwrap(); let expected_genesis = config.parse_genesis::().unwrap(); - assert_eq!(config.node, SocketAddr::from_str("0.0.0.0:4130").unwrap()); + assert_eq!(config.node, Some(SocketAddr::from_str("0.0.0.0:4130").unwrap())); assert_eq!(config.rest, SocketAddr::from_str("0.0.0.0:3030").unwrap()); assert_eq!(trusted_peers.len(), 0); assert_eq!(trusted_validators.len(), 1); @@ -759,7 +880,7 @@ mod tests { Start::try_parse_from(["snarkos", "--dev", "1", "--validator", "--private-key", ""].iter()).unwrap(); config.parse_development(&mut trusted_peers, &mut trusted_validators).unwrap(); let genesis = config.parse_genesis::().unwrap(); - assert_eq!(config.node, SocketAddr::from_str("0.0.0.0:4131").unwrap()); + assert_eq!(config.node, Some(SocketAddr::from_str("0.0.0.0:4131").unwrap())); assert_eq!(config.rest, SocketAddr::from_str("0.0.0.0:3031").unwrap()); assert_eq!(trusted_peers.len(), 1); assert_eq!(trusted_validators.len(), 1); @@ -774,7 +895,7 @@ mod tests { Start::try_parse_from(["snarkos", "--dev", "2", "--prover", "--private-key", ""].iter()).unwrap(); config.parse_development(&mut trusted_peers, &mut trusted_validators).unwrap(); let genesis = config.parse_genesis::().unwrap(); - assert_eq!(config.node, SocketAddr::from_str("0.0.0.0:4132").unwrap()); + assert_eq!(config.node, Some(SocketAddr::from_str("0.0.0.0:4132").unwrap())); assert_eq!(config.rest, SocketAddr::from_str("0.0.0.0:3032").unwrap()); assert_eq!(trusted_peers.len(), 2); assert_eq!(trusted_validators.len(), 2); @@ -789,7 +910,7 @@ mod tests { Start::try_parse_from(["snarkos", "--dev", "3", "--client", "--private-key", ""].iter()).unwrap(); config.parse_development(&mut trusted_peers, &mut trusted_validators).unwrap(); let genesis = config.parse_genesis::().unwrap(); - assert_eq!(config.node, SocketAddr::from_str("0.0.0.0:4133").unwrap()); + assert_eq!(config.node, Some(SocketAddr::from_str("0.0.0.0:4133").unwrap())); assert_eq!(config.rest, SocketAddr::from_str("0.0.0.0:3033").unwrap()); assert_eq!(trusted_peers.len(), 3); assert_eq!(trusted_validators.len(), 2); @@ -817,7 +938,7 @@ mod tests { "--validators", "IP1,IP2,IP3", "--rest", - "127.0.0.1:3033", + "127.0.0.1:3030", ]; let cli = CLI::parse_from(arg_vec); @@ -827,8 +948,8 @@ mod tests { assert!(start.validator); assert_eq!(start.private_key.as_deref(), Some("PRIVATE_KEY")); assert_eq!(start.cdn, "CDN"); - assert_eq!(start.rest, "127.0.0.1:3033".parse().unwrap()); - assert_eq!(start.network, 3); + assert_eq!(start.rest, "127.0.0.1:3030".parse().unwrap()); + assert_eq!(start.network, 0); assert_eq!(start.peers, "IP1,IP2,IP3"); assert_eq!(start.validators, "IP1,IP2,IP3"); } else { diff --git a/cli/src/helpers/dynamic_format.rs b/cli/src/helpers/dynamic_format.rs new file mode 100644 index 0000000000..6e062857ad --- /dev/null +++ b/cli/src/helpers/dynamic_format.rs @@ -0,0 +1,111 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; + +use time::{ + format_description::{self, OwnedFormatItem}, + OffsetDateTime, +}; +use tracing::{Event, Subscriber}; +use tracing_subscriber::{ + fmt::{format::Writer, FmtContext, FormatEvent, FormatFields}, + registry::LookupSpan, +}; + +/// A formatter that can switch between the default formatter and the DIM style. +pub struct DynamicFormatter { + dim_format: DimFormat, + default_format: tracing_subscriber::fmt::format::Format, + // This is the shutdown flag. When set to true, switch to the DIM format. + dim: Arc, +} + +impl FormatEvent for DynamicFormatter +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event(&self, ctx: &FmtContext<'_, S, N>, writer: Writer<'_>, event: &Event<'_>) -> std::fmt::Result { + if self.dim.load(Ordering::Relaxed) { + self.dim_format.format_event(ctx, writer, event) + } else { + self.default_format.format_event(ctx, writer, event) + } + } +} + +impl DynamicFormatter { + pub fn new(dim: Arc) -> Self { + let dim_format = DimFormat::new(); + let default_format = tracing_subscriber::fmt::format::Format::default(); + Self { dim_format, default_format, dim } + } +} + +struct DimFormat { + fmt: OwnedFormatItem, +} + +/// A custom format for the DIM style. +/// This formatter is quite basic and does not support all the features of the default formatter. +/// It does support all the default fields of the default formatter. +impl DimFormat { + fn new() -> Self { + let format = + format_description::parse_owned::<2>("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond digits:6]Z") + .expect("failed to set timestampt format"); + Self { fmt: format } + } +} + +impl FormatEvent for DimFormat +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + /// Format like the `Full` format, but using the DIM tty style. + fn format_event(&self, ctx: &FmtContext<'_, S, N>, mut writer: Writer<'_>, event: &Event<'_>) -> std::fmt::Result { + // set the DIM style if we are in TTY mode + if writer.has_ansi_escapes() { + write!(writer, "\x1b[2m")?; + } + + let date_time = OffsetDateTime::now_utc(); + write!(writer, "{} ", date_time.format(&self.fmt).map_err(|_| std::fmt::Error)?)?; + + let meta = event.metadata(); + let fmt_level = match *meta.level() { + tracing::Level::ERROR => "ERROR", + tracing::Level::WARN => "WARN ", + tracing::Level::INFO => "INFO ", + tracing::Level::DEBUG => "DEBUG", + tracing::Level::TRACE => "TRACE", + }; + write!(writer, "{}", fmt_level)?; + + write!(writer, "{}: ", meta.target())?; + + ctx.format_fields(writer.by_ref(), event)?; + + // reset the style + if writer.has_ansi_escapes() { + write!(writer, "\x1b[0m")?; + } + writeln!(writer) + } +} diff --git a/cli/src/helpers/logger.rs b/cli/src/helpers/logger.rs index 2bb5cf66ad..2fe07285d2 100644 --- a/cli/src/helpers/logger.rs +++ b/cli/src/helpers/logger.rs @@ -12,10 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::helpers::LogWriter; +use crate::helpers::{DynamicFormatter, LogWriter}; use crossterm::tty::IsTty; -use std::{fs::File, io, path::Path}; +use std::{ + fs::File, + io, + path::Path, + sync::{atomic::AtomicBool, Arc}, +}; use tokio::sync::mpsc; use tracing_subscriber::{ layer::{Layer, SubscriberExt}, @@ -34,7 +39,12 @@ use tracing_subscriber::{ /// 5 => info, debug, trace, snarkos_node_router=trace /// 6 => info, debug, trace, snarkos_node_tcp=trace /// ``` -pub fn initialize_logger>(verbosity: u8, nodisplay: bool, logfile: P) -> mpsc::Receiver> { +pub fn initialize_logger>( + verbosity: u8, + nodisplay: bool, + logfile: P, + shutdown: Arc, +) -> mpsc::Receiver> { match verbosity { 0 => std::env::set_var("RUST_LOG", "info"), 1 => std::env::set_var("RUST_LOG", "debug"), @@ -111,6 +121,7 @@ pub fn initialize_logger>(verbosity: u8, nodisplay: bool, logfile .with_ansi(log_sender.is_none() && io::stdout().is_tty()) .with_writer(move || LogWriter::new(&log_sender)) .with_target(verbosity > 2) + .event_format(DynamicFormatter::new(shutdown)) .with_filter(filter), ) .with( diff --git a/cli/src/helpers/mod.rs b/cli/src/helpers/mod.rs index 6cabe254b2..7ea38c52f1 100644 --- a/cli/src/helpers/mod.rs +++ b/cli/src/helpers/mod.rs @@ -18,6 +18,9 @@ pub use bech32m::*; mod log_writer; use log_writer::*; +mod dynamic_format; +use dynamic_format::*; + pub mod logger; pub use logger::*; @@ -41,8 +44,8 @@ pub fn check_open_files_limit(minimum: u64) { // Warn about too low limit. let warning = [ format!("⚠️ The open files limit ({soft_limit}) for this process is lower than recommended."), - format!("⚠️ To ensure correct behavior of the node, please raise it to at least {minimum}."), - "⚠️ See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), + format!(" • To ensure correct behavior of the node, please raise it to at least {minimum}."), + " • See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), ] .join("\n") .yellow() @@ -54,8 +57,8 @@ pub fn check_open_files_limit(minimum: u64) { // Warn about unknown limit. let warning = [ format!("⚠️ Unable to check the open files limit for this process due to {err}."), - format!("⚠️ To ensure correct behavior of the node, please ensure it is at least {minimum}."), - "⚠️ See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), + format!(" • To ensure correct behavior of the node, please ensure it is at least {minimum}."), + " • See the `ulimit` command and `/etc/security/limits.conf` for more details.".to_owned(), ] .join("\n") .yellow() diff --git a/devnet.sh b/devnet.sh index e91627335a..83ea2cc6b1 100755 --- a/devnet.sh +++ b/devnet.sh @@ -8,6 +8,10 @@ total_validators=${total_validators:-4} read -p "Enter the total number of clients (default: 2): " total_clients total_clients=${total_clients:-2} +# Read the network ID from user or use a default value of 1 +read -p "Enter the network ID (mainnet = 0, testnet = 1) (default: 1): " network_id +network_id=${network_id:-1} + # Ask the user if they want to run 'cargo install --locked --path .' or use a pre-installed binary read -p "Do you want to run 'cargo install --locked --path .' to build the binary? (y/n, default: y): " build_binary build_binary=${build_binary:-y} @@ -28,7 +32,7 @@ if [[ $clear_ledger == "y" ]]; then for ((index = 0; index < $((total_validators + total_clients)); index++)); do # Run 'snarkos clean' for each node in the background - snarkos clean --dev $index & + snarkos clean --network $network_id --dev $index & # Store the process ID of the background task clean_processes+=($!) @@ -64,12 +68,12 @@ for validator_index in "${validator_indices[@]}"; do # Send the command to start the validator to the new window and capture output to the log file if [ "$validator_index" -eq 0 ]; then - tmux send-keys -t "devnet:window$validator_index" "snarkos start --nodisplay --dev $validator_index --dev-num-validators $total_validators --validator --logfile $log_file --metrics" C-m + tmux send-keys -t "devnet:window$validator_index" "snarkos start --nodisplay --network $network_id --dev $validator_index --allow-external-peers --dev-num-validators $total_validators --validator --logfile $log_file --metrics" C-m else # Create a new window with a unique name window_index=$((validator_index + index_offset)) tmux new-window -t "devnet:$window_index" -n "window$validator_index" - tmux send-keys -t "devnet:window$validator_index" "snarkos start --nodisplay --dev $validator_index --dev-num-validators $total_validators --validator --logfile $log_file" C-m + tmux send-keys -t "devnet:window$validator_index" "snarkos start --nodisplay --network $network_id --dev $validator_index --allow-external-peers --dev-num-validators $total_validators --validator --logfile $log_file" C-m fi done @@ -87,7 +91,7 @@ for client_index in "${client_indices[@]}"; do tmux new-window -t "devnet:$window_index" -n "window-$window_index" # Send the command to start the validator to the new window and capture output to the log file - tmux send-keys -t "devnet:window-$window_index" "snarkos start --nodisplay --dev $window_index --client --logfile $log_file" C-m + tmux send-keys -t "devnet:window-$window_index" "snarkos start --nodisplay --network $network_id --dev $window_index --dev-num-validators $total_validators --client --logfile $log_file" C-m done # Attach to the tmux session to view and interact with the windows diff --git a/node/Cargo.toml b/node/Cargo.toml index 88e23403fd..82cf32b8e6 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -129,7 +129,7 @@ version = "0.2" version = "1" [dev-dependencies.pea2pea] -version = "0.46" +version = "0.49" [dev-dependencies.snarkos-node-router] path = "./router" diff --git a/node/bft/Cargo.toml b/node/bft/Cargo.toml index 300e6adb21..8b2c5ed86c 100644 --- a/node/bft/Cargo.toml +++ b/node/bft/Cargo.toml @@ -18,7 +18,7 @@ edition = "2021" [features] default = [ ] -metrics = [ "dep:metrics", "snarkos-node-bft-events/metrics" ] +metrics = [ "dep:metrics", "snarkos-node-bft-events/metrics", "snarkos-node-bft-ledger-service/metrics" ] [dependencies.aleo-std] workspace = true @@ -97,9 +97,6 @@ version = "=2.2.7" [dependencies.snarkvm] workspace = true -[dependencies.snow] -version = "0.9" - [dependencies.time] version = "0.3" @@ -140,6 +137,9 @@ version = "5" [dev-dependencies.paste] version = "1" +[dev-dependencies.pea2pea] + version = "0.49" + [dev-dependencies.proptest] version = "1.4.0" diff --git a/node/bft/events/Cargo.toml b/node/bft/events/Cargo.toml index 5eb134b4c7..1dc1de94cb 100644 --- a/node/bft/events/Cargo.toml +++ b/node/bft/events/Cargo.toml @@ -18,7 +18,7 @@ edition = "2021" [features] default = [ ] -metrics = [ "dep:metrics" ] +metrics = ["dep:metrics", "snarkvm/metrics"] [dependencies.anyhow] version = "1.0" @@ -49,9 +49,6 @@ version = "=2.2.7" [dependencies.snarkvm] workspace = true -[dependencies.snow] -version = "0.9" - [dependencies.tokio-util] version = "0.7" features = [ "codec" ] diff --git a/node/bft/events/src/batch_certified.rs b/node/bft/events/src/batch_certified.rs index 615a200c28..03898af68f 100644 --- a/node/bft/events/src/batch_certified.rs +++ b/node/bft/events/src/batch_certified.rs @@ -65,7 +65,7 @@ pub mod prop_tests { use proptest::prelude::{BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_batch_certified() -> BoxedStrategy> { any_batch_certificate().prop_map(BatchCertified::from).boxed() diff --git a/node/bft/events/src/batch_propose.rs b/node/bft/events/src/batch_propose.rs index 54b48d6905..a1c180247e 100644 --- a/node/bft/events/src/batch_propose.rs +++ b/node/bft/events/src/batch_propose.rs @@ -72,7 +72,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_batch_propose() -> BoxedStrategy> { any::() diff --git a/node/bft/events/src/batch_signature.rs b/node/bft/events/src/batch_signature.rs index 0785c96a7f..c6ab199424 100644 --- a/node/bft/events/src/batch_signature.rs +++ b/node/bft/events/src/batch_signature.rs @@ -65,7 +65,7 @@ pub mod prop_tests { use proptest::prelude::{BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_batch_signature() -> BoxedStrategy> { (any_field(), any_signature()) diff --git a/node/bft/events/src/block_response.rs b/node/bft/events/src/block_response.rs index 15e1db2254..2bf1f20057 100644 --- a/node/bft/events/src/block_response.rs +++ b/node/bft/events/src/block_response.rs @@ -64,7 +64,7 @@ pub struct DataBlocks(pub Vec>); impl DataBlocks { /// The maximum number of blocks that can be sent in a single message. - pub const MAXIMUM_NUMBER_OF_BLOCKS: u8 = 1; + pub const MAXIMUM_NUMBER_OF_BLOCKS: u8 = 5; /// Ensures that the blocks are well-formed in a block response. pub fn ensure_response_is_well_formed( @@ -148,7 +148,7 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_block() -> BoxedStrategy> { any::().prop_map(|seed| sample_genesis_block(&mut TestRng::fixed(seed))).boxed() diff --git a/node/bft/events/src/certificate_request.rs b/node/bft/events/src/certificate_request.rs index 9190186f65..525112fd7f 100644 --- a/node/bft/events/src/certificate_request.rs +++ b/node/bft/events/src/certificate_request.rs @@ -68,7 +68,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_field() -> BoxedStrategy> { any::().prop_map(|_| Field::rand(&mut TestRng::default())).boxed() diff --git a/node/bft/events/src/certificate_response.rs b/node/bft/events/src/certificate_response.rs index f0942dfc32..e24c3db3f4 100644 --- a/node/bft/events/src/certificate_response.rs +++ b/node/bft/events/src/certificate_response.rs @@ -80,13 +80,13 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_batch_header(committee: &CommitteeContext) -> BoxedStrategy> { (Just(committee.clone()), any::(), vec(any_transmission(), 0..16)) .prop_map(|(committee, selector, transmissions)| { let mut rng = TestRng::default(); - let CommitteeContext(_, ValidatorSet(validators)) = committee; + let CommitteeContext(committee, ValidatorSet(validators)) = committee; let signer = selector.select(validators); let transmission_ids = transmissions.into_iter().map(|(id, _)| id).collect(); @@ -94,9 +94,9 @@ pub mod prop_tests { &signer.private_key, 0, now(), + committee.id(), transmission_ids, Default::default(), - Default::default(), &mut rng, ) .unwrap() diff --git a/node/bft/events/src/challenge_request.rs b/node/bft/events/src/challenge_request.rs index 2ce3236bec..30e7321e1b 100644 --- a/node/bft/events/src/challenge_request.rs +++ b/node/bft/events/src/challenge_request.rs @@ -70,7 +70,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_valid_address() -> BoxedStrategy> { any::().prop_map(|seed| Address::rand(&mut TestRng::fixed(seed))).boxed() diff --git a/node/bft/events/src/challenge_response.rs b/node/bft/events/src/challenge_response.rs index 0fc678a86d..c75953c4ec 100644 --- a/node/bft/events/src/challenge_response.rs +++ b/node/bft/events/src/challenge_response.rs @@ -17,6 +17,7 @@ use super::*; #[derive(Clone, Debug, PartialEq, Eq)] pub struct ChallengeResponse { pub signature: Data>, + pub nonce: u64, } impl EventTrait for ChallengeResponse { @@ -30,6 +31,7 @@ impl EventTrait for ChallengeResponse { impl ToBytes for ChallengeResponse { fn write_le(&self, mut writer: W) -> IoResult<()> { self.signature.write_le(&mut writer)?; + self.nonce.write_le(&mut writer)?; Ok(()) } } @@ -37,8 +39,9 @@ impl ToBytes for ChallengeResponse { impl FromBytes for ChallengeResponse { fn read_le(mut reader: R) -> IoResult { let signature = Data::read_le(&mut reader)?; + let nonce = u64::read_le(&mut reader)?; - Ok(Self { signature }) + Ok(Self { signature, nonce }) } } @@ -53,10 +56,10 @@ pub mod prop_tests { }; use bytes::{Buf, BufMut, BytesMut}; - use proptest::prelude::{BoxedStrategy, Strategy}; + use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_signature() -> BoxedStrategy> { (0..64) @@ -70,7 +73,9 @@ pub mod prop_tests { } pub fn any_challenge_response() -> BoxedStrategy> { - any_signature().prop_map(|sig| ChallengeResponse { signature: Data::Object(sig) }).boxed() + (any_signature(), any::()) + .prop_map(|(sig, nonce)| ChallengeResponse { signature: Data::Object(sig), nonce }) + .boxed() } #[proptest] diff --git a/node/bft/events/src/helpers/codec.rs b/node/bft/events/src/helpers/codec.rs index 1de8bcf694..89466a99a3 100644 --- a/node/bft/events/src/helpers/codec.rs +++ b/node/bft/events/src/helpers/codec.rs @@ -15,24 +15,15 @@ use crate::Event; use snarkvm::prelude::{FromBytes, Network, ToBytes}; -use bytes::{Buf, BufMut, Bytes, BytesMut}; +use bytes::{Buf, BufMut, BytesMut}; use core::marker::PhantomData; -use rayon::{ - iter::{IndexedParallelIterator, ParallelIterator}, - prelude::ParallelSlice, -}; -use snow::{HandshakeState, StatelessTransportState}; -use std::{io, sync::Arc}; use tokio_util::codec::{Decoder, Encoder, LengthDelimitedCodec}; use tracing::*; /// The maximum size of an event that can be transmitted during the handshake. const MAX_HANDSHAKE_SIZE: usize = 1024 * 1024; // 1 MiB /// The maximum size of an event that can be transmitted in the network. -const MAX_EVENT_SIZE: usize = 128 * 1024 * 1024; // 128 MiB - -/// The type of noise handshake to use for network encryption. -pub const NOISE_HANDSHAKE_TYPE: &str = "Noise_XX_25519_ChaChaPoly_BLAKE2s"; +const MAX_EVENT_SIZE: usize = 256 * 1024 * 1024; // 256 MiB /// The codec used to decode and encode network `Event`s. pub struct EventCodec { @@ -96,297 +87,25 @@ impl Decoder for EventCodec { } } -/* NOISE CODEC */ - -// The maximum message size for noise messages. If the data to be encrypted exceeds it, it is chunked. -const MAX_MESSAGE_LEN: usize = 65535; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum EventOrBytes { - Bytes(Bytes), - Event(Event), -} - -impl ToBytes for EventOrBytes { - fn write_le(&self, mut writer: W) -> io::Result<()> { - match self { - Self::Bytes(bytes) => { - 0u8.write_le(&mut writer)?; - writer.write_all(bytes) - } - Self::Event(event) => { - 1u8.write_le(&mut writer)?; - event.write_le(writer) - } - } - } -} - -#[derive(Clone)] -pub struct PostHandshakeState { - state: Arc, - tx_nonce: u64, - rx_nonce: u64, -} - -pub enum NoiseState { - Handshake(Box), - PostHandshake(PostHandshakeState), - Failed, -} - -impl Clone for NoiseState { - fn clone(&self) -> Self { - match self { - Self::Handshake(..) => unreachable!(), - Self::PostHandshake(ph_state) => Self::PostHandshake(ph_state.clone()), - Self::Failed => unreachable!("Forbidden: cloning noise handshake"), - } - } -} - -impl NoiseState { - pub fn into_post_handshake_state(self) -> Self { - if let Self::Handshake(noise_state) = self { - match noise_state.into_stateless_transport_mode() { - Ok(new_state) => { - return Self::PostHandshake(PostHandshakeState { - state: Arc::new(new_state), - tx_nonce: 0, - rx_nonce: 0, - }); - } - Err(error) => { - warn!("Handshake not finished - {error}"); - } - } - } else { - warn!("Handshake in wrong state"); - } - - NoiseState::Failed - } -} - -pub struct NoiseCodec { - codec: LengthDelimitedCodec, - event_codec: EventCodec, - pub noise_state: NoiseState, -} - -impl NoiseCodec { - pub fn new(noise_state: NoiseState) -> Self { - Self { codec: LengthDelimitedCodec::new(), event_codec: EventCodec::default(), noise_state } - } -} - -impl Encoder> for NoiseCodec { - type Error = std::io::Error; - - fn encode(&mut self, message_or_bytes: EventOrBytes, dst: &mut BytesMut) -> Result<(), Self::Error> { - #[cfg(feature = "metrics")] - let start = std::time::Instant::now(); - - let ciphertext = match self.noise_state { - NoiseState::Handshake(ref mut noise) => { - match message_or_bytes { - // Don't allow message sending before the noise handshake has completed. - EventOrBytes::Event(_) => unimplemented!(), - EventOrBytes::Bytes(bytes) => { - let mut buffer = [0u8; MAX_MESSAGE_LEN]; - let len = noise - .write_message(&bytes, &mut buffer[..]) - .map_err(|e| Self::Error::new(io::ErrorKind::InvalidInput, e))?; - - #[cfg(feature = "metrics")] - metrics::histogram(metrics::tcp::NOISE_CODEC_ENCRYPTION_SIZE, len as f64); - - buffer[..len].into() - } - } - } - - NoiseState::PostHandshake(ref mut noise) => { - // Encode the message using the event codec. - let mut bytes = BytesMut::new(); - match message_or_bytes { - // Don't allow sending raw bytes after the noise handshake has completed. - EventOrBytes::Bytes(_) => panic!("Unsupported post-handshake"), - EventOrBytes::Event(event) => self.event_codec.encode(event, &mut bytes)?, - } - - #[cfg(feature = "metrics")] - metrics::histogram(metrics::tcp::NOISE_CODEC_ENCRYPTION_SIZE, bytes.len() as f64); - - // Chunk the payload if necessary and encrypt with Noise. - // - // A Noise transport message is simply an AEAD ciphertext that is less than or - // equal to 65535 bytes in length, and that consists of an encrypted payload plus - // 16 bytes of authentication data. - // - // See: https://noiseprotocol.org/noise.html#the-handshakestate-object - const TAG_LEN: usize = 16; - let encrypted_chunks = bytes - .par_chunks(MAX_MESSAGE_LEN - TAG_LEN) - .enumerate() - .map(|(nonce_offset, plaintext_chunk)| { - let mut buffer = vec![0u8; MAX_MESSAGE_LEN]; - let len = noise - .state - .write_message(noise.tx_nonce + nonce_offset as u64, plaintext_chunk, &mut buffer) - .map_err(|e| Self::Error::new(io::ErrorKind::InvalidInput, e))?; - - buffer.truncate(len); - - Ok(buffer) - }) - .collect::>>>()?; - - let mut buffer = BytesMut::with_capacity(encrypted_chunks.len()); - for chunk in encrypted_chunks { - buffer.extend_from_slice(&chunk); - noise.tx_nonce += 1; - } - - buffer - } - - NoiseState::Failed => unreachable!("Noise handshake failed to encode"), - }; - - // Encode the resulting ciphertext using the length-delimited codec. - #[allow(clippy::let_and_return)] - let result = self.codec.encode(ciphertext.freeze(), dst); - - #[cfg(feature = "metrics")] - metrics::histogram(metrics::tcp::NOISE_CODEC_ENCRYPTION_TIME, start.elapsed().as_micros() as f64); - result - } -} - -impl Decoder for NoiseCodec { - type Error = io::Error; - type Item = EventOrBytes; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - #[cfg(feature = "metrics")] - metrics::histogram(metrics::tcp::NOISE_CODEC_DECRYPTION_SIZE, src.len() as f64); - #[cfg(feature = "metrics")] - let start = std::time::Instant::now(); - - // Decode the ciphertext with the length-delimited codec. - let Some(bytes) = self.codec.decode(src)? else { - return Ok(None); - }; - - let msg = match self.noise_state { - NoiseState::Handshake(ref mut noise) => { - // Decrypt the ciphertext in handshake mode. - let mut buffer = [0u8; MAX_MESSAGE_LEN]; - let len = noise.read_message(&bytes, &mut buffer).map_err(|_| io::ErrorKind::InvalidData)?; - - Some(EventOrBytes::Bytes(Bytes::copy_from_slice(&buffer[..len]))) - } - - NoiseState::PostHandshake(ref mut noise) => { - // Noise decryption. - let decrypted_chunks = bytes - .par_chunks(MAX_MESSAGE_LEN) - .enumerate() - .map(|(nonce_offset, encrypted_chunk)| { - let mut buffer = vec![0u8; MAX_MESSAGE_LEN]; - - // Decrypt the ciphertext in post-handshake mode. - let len = noise - .state - .read_message(noise.rx_nonce + nonce_offset as u64, encrypted_chunk, &mut buffer) - .map_err(|_| io::ErrorKind::InvalidData)?; - - buffer.truncate(len); - Ok(buffer) - }) - .collect::>>>()?; - - // Collect chunks into plaintext to be passed to the message codecs. - let mut plaintext = BytesMut::new(); - for chunk in decrypted_chunks { - plaintext.extend_from_slice(&chunk); - noise.rx_nonce += 1; - } - - // Decode with message codecs. - self.event_codec.decode(&mut plaintext)?.map(|msg| EventOrBytes::Event(msg)) - } - - NoiseState::Failed => unreachable!("Noise handshake failed to decode"), - }; - - #[cfg(feature = "metrics")] - metrics::histogram(metrics::tcp::NOISE_CODEC_DECRYPTION_TIME, start.elapsed().as_micros() as f64); - Ok(msg) - } -} - #[cfg(test)] mod tests { use super::*; use crate::prop_tests::any_event; - - use snow::{params::NoiseParams, Builder}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; - - fn handshake_xx() -> (NoiseCodec, NoiseCodec) { - let params: NoiseParams = NOISE_HANDSHAKE_TYPE.parse().unwrap(); - let initiator_builder = Builder::new(params.clone()); - let initiator_kp = initiator_builder.generate_keypair().unwrap(); - let initiator = initiator_builder.local_private_key(&initiator_kp.private).build_initiator().unwrap(); - - let responder_builder = Builder::new(params); - let responder_kp = responder_builder.generate_keypair().unwrap(); - let responder = responder_builder.local_private_key(&responder_kp.private).build_responder().unwrap(); - - let mut initiator_codec = NoiseCodec::new(NoiseState::Handshake(Box::new(initiator))); - let mut responder_codec = NoiseCodec::new(NoiseState::Handshake(Box::new(responder))); - - let mut ciphertext = BytesMut::new(); - - // -> e - assert!(initiator_codec.encode(EventOrBytes::Bytes(Bytes::new()), &mut ciphertext).is_ok()); - assert!( - matches!(responder_codec.decode(&mut ciphertext).unwrap().unwrap(), EventOrBytes::Bytes(bytes) if bytes.is_empty()) - ); - - // <- e, ee, s, es - assert!(responder_codec.encode(EventOrBytes::Bytes(Bytes::new()), &mut ciphertext).is_ok()); - assert!( - matches!(initiator_codec.decode(&mut ciphertext).unwrap().unwrap(), EventOrBytes::Bytes(bytes) if bytes.is_empty()) - ); - - // -> s, se - assert!(initiator_codec.encode(EventOrBytes::Bytes(Bytes::new()), &mut ciphertext).is_ok()); - assert!( - matches!(responder_codec.decode(&mut ciphertext).unwrap().unwrap(), EventOrBytes::Bytes(bytes) if bytes.is_empty()) - ); - - initiator_codec.noise_state = initiator_codec.noise_state.into_post_handshake_state(); - responder_codec.noise_state = responder_codec.noise_state.into_post_handshake_state(); - - (initiator_codec, responder_codec) - } + type CurrentNetwork = snarkvm::prelude::MainnetV0; - fn assert_roundtrip(msg: EventOrBytes) { - let (mut initiator_codec, mut responder_codec) = handshake_xx(); - let mut ciphertext = BytesMut::new(); + fn assert_roundtrip(msg: Event) { + let mut codec: EventCodec = Default::default(); + let mut encoded_event = BytesMut::new(); - assert!(initiator_codec.encode(msg.clone(), &mut ciphertext).is_ok()); - let decoded = responder_codec.decode(&mut ciphertext).unwrap().unwrap(); + assert!(codec.encode(msg.clone(), &mut encoded_event).is_ok()); + let decoded = codec.decode(&mut encoded_event).unwrap().unwrap(); assert_eq!(decoded.to_bytes_le().unwrap(), msg.to_bytes_le().unwrap()); } #[proptest] fn event_roundtrip(#[strategy(any_event())] event: Event) { - assert_roundtrip(EventOrBytes::Event(event)) + assert_roundtrip(event) } } diff --git a/node/bft/events/src/lib.rs b/node/bft/events/src/lib.rs index d49073b1c5..c6e5c04a8c 100644 --- a/node/bft/events/src/lib.rs +++ b/node/bft/events/src/lib.rs @@ -70,7 +70,6 @@ use snarkvm::{ console::prelude::{error, FromBytes, Network, Read, ToBytes, Write}, ledger::{ block::Block, - committee::Committee, narwhal::{BatchCertificate, BatchHeader, Data, Transmission, TransmissionID}, }, prelude::{Address, Field, Signature}, @@ -118,7 +117,7 @@ impl From for Event { impl Event { /// The version of the event protocol; it can be incremented in order to force users to update. - pub const VERSION: u32 = 5; + pub const VERSION: u32 = 6; /// Returns the event name. #[inline] @@ -232,7 +231,7 @@ mod tests { use crate::Event; use bytes::{Buf, BufMut, BytesMut}; use snarkvm::console::prelude::{FromBytes, ToBytes}; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; #[test] fn deserializing_invalid_data_panics() { @@ -265,7 +264,7 @@ pub mod prop_tests { }; use snarkvm::{ console::{network::Network, types::Field}, - ledger::{coinbase::PuzzleCommitment, narwhal::TransmissionID}, + ledger::{narwhal::TransmissionID, puzzle::SolutionID}, prelude::{FromBytes, Rng, ToBytes, Uniform}, }; @@ -276,15 +275,15 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; /// Returns the current UTC epoch timestamp. pub fn now() -> i64 { time::OffsetDateTime::now_utc().unix_timestamp() } - pub fn any_puzzle_commitment() -> BoxedStrategy> { - Just(0).prop_perturb(|_, mut rng| PuzzleCommitment::from_g1_affine(rng.gen())).boxed() + pub fn any_solution_id() -> BoxedStrategy> { + Just(0).prop_perturb(|_, mut rng| rng.gen::().into()).boxed() } pub fn any_transaction_id() -> BoxedStrategy<::TransactionID> { @@ -296,7 +295,7 @@ pub mod prop_tests { pub fn any_transmission_id() -> BoxedStrategy> { prop_oneof![ any_transaction_id().prop_map(TransmissionID::Transaction), - any_puzzle_commitment().prop_map(TransmissionID::Solution), + any_solution_id().prop_map(TransmissionID::Solution), ] .boxed() } diff --git a/node/bft/events/src/primary_ping.rs b/node/bft/events/src/primary_ping.rs index 8f2fe67da7..23dd85a926 100644 --- a/node/bft/events/src/primary_ping.rs +++ b/node/bft/events/src/primary_ping.rs @@ -19,7 +19,6 @@ pub struct PrimaryPing { pub version: u32, pub block_locators: BlockLocators, pub primary_certificate: Data>, - pub batch_certificates: IndexMap, Data>>, } impl PrimaryPing { @@ -28,28 +27,15 @@ impl PrimaryPing { version: u32, block_locators: BlockLocators, primary_certificate: Data>, - batch_certificates: IndexMap, Data>>, ) -> Self { - Self { version, block_locators, primary_certificate, batch_certificates } + Self { version, block_locators, primary_certificate } } } -impl From<(u32, BlockLocators, BatchCertificate, IndexSet>)> for PrimaryPing { +impl From<(u32, BlockLocators, BatchCertificate)> for PrimaryPing { /// Initializes a new ping event. - fn from( - (version, block_locators, primary_certificate, batch_certificates): ( - u32, - BlockLocators, - BatchCertificate, - IndexSet>, - ), - ) -> Self { - Self::new( - version, - block_locators, - Data::Object(primary_certificate), - batch_certificates.into_iter().map(|c| (c.id(), Data::Object(c))).collect(), - ) + fn from((version, block_locators, primary_certificate): (u32, BlockLocators, BatchCertificate)) -> Self { + Self::new(version, block_locators, Data::Object(primary_certificate)) } } @@ -70,17 +56,6 @@ impl ToBytes for PrimaryPing { // Write the primary certificate. self.primary_certificate.write_le(&mut writer)?; - // Determine the number of batch certificates. - let num_certificates = - u16::try_from(self.batch_certificates.len()).map_err(error)?.min(Committee::::MAX_COMMITTEE_SIZE); - - // Write the number of batch certificates. - num_certificates.write_le(&mut writer)?; - // Write the batch certificates. - for (certificate_id, certificate) in self.batch_certificates.iter().take(usize::from(num_certificates)) { - certificate_id.write_le(&mut writer)?; - certificate.write_le(&mut writer)?; - } Ok(()) } } @@ -94,27 +69,8 @@ impl FromBytes for PrimaryPing { // Read the primary certificate. let primary_certificate = Data::read_le(&mut reader)?; - // Read the number of batch certificates. - let num_certificates = u16::read_le(&mut reader)?; - // Ensure the number of batch certificates is not greater than the maximum committee size. - // Note: We allow there to be 0 batch certificates. This is necessary to ensure primary pings are sent. - if num_certificates > Committee::::MAX_COMMITTEE_SIZE { - return Err(error("The number of batch certificates is greater than the maximum committee size")); - } - - // Read the batch certificates. - let mut batch_certificates = IndexMap::with_capacity(usize::from(num_certificates)); - for _ in 0..num_certificates { - // Read the certificate ID. - let certificate_id = Field::read_le(&mut reader)?; - // Read the certificate. - let certificate = Data::read_le(&mut reader)?; - // Insert the certificate. - batch_certificates.insert(certificate_id, certificate); - } - // Return the ping event. - Ok(Self::new(version, block_locators, primary_certificate, batch_certificates)) + Ok(Self::new(version, block_locators, primary_certificate)) } } @@ -125,11 +81,10 @@ pub mod prop_tests { use snarkvm::utilities::{FromBytes, ToBytes}; use bytes::{Buf, BufMut, BytesMut}; - use indexmap::indexset; use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_block_locators() -> BoxedStrategy> { any::().prop_map(sample_block_locators).boxed() @@ -138,7 +93,7 @@ pub mod prop_tests { pub fn any_primary_ping() -> BoxedStrategy> { (any::(), any_block_locators(), any_batch_certificate()) .prop_map(|(version, block_locators, batch_certificate)| { - PrimaryPing::from((version, block_locators, batch_certificate.clone(), indexset![batch_certificate])) + PrimaryPing::from((version, block_locators, batch_certificate.clone())) }) .boxed() } @@ -154,13 +109,5 @@ pub mod prop_tests { primary_ping.primary_certificate.deserialize_blocking().unwrap(), decoded.primary_certificate.deserialize_blocking().unwrap(), ); - assert!( - primary_ping - .batch_certificates - .into_iter() - .map(|(a, bc)| (a, bc.deserialize_blocking().unwrap())) - .zip(decoded.batch_certificates.into_iter().map(|(a, bc)| (a, bc.deserialize_blocking().unwrap()))) - .all(|(a, b)| a == b) - ) } } diff --git a/node/bft/events/src/transmission_request.rs b/node/bft/events/src/transmission_request.rs index 7b1ec442cd..2fded63bb6 100644 --- a/node/bft/events/src/transmission_request.rs +++ b/node/bft/events/src/transmission_request.rs @@ -59,7 +59,7 @@ impl FromBytes for TransmissionRequest { #[cfg(test)] pub mod prop_tests { use crate::{ - prop_tests::{any_puzzle_commitment, any_transaction_id}, + prop_tests::{any_solution_id, any_transaction_id}, TransmissionRequest, }; use snarkvm::{ @@ -74,11 +74,11 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; fn any_transmission_id() -> BoxedStrategy> { prop_oneof![ - any_puzzle_commitment().prop_map(TransmissionID::Solution), + any_solution_id().prop_map(TransmissionID::Solution), any_transaction_id().prop_map(TransmissionID::Transaction), ] .boxed() diff --git a/node/bft/events/src/transmission_response.rs b/node/bft/events/src/transmission_response.rs index c08f729a3f..536f0067af 100644 --- a/node/bft/events/src/transmission_response.rs +++ b/node/bft/events/src/transmission_response.rs @@ -62,7 +62,7 @@ impl FromBytes for TransmissionResponse { #[cfg(test)] pub mod prop_tests { use crate::{ - prop_tests::{any_puzzle_commitment, any_transaction_id}, + prop_tests::{any_solution_id, any_transaction_id}, TransmissionResponse, }; use snarkvm::{ @@ -78,11 +78,11 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_transmission() -> BoxedStrategy<(TransmissionID, Transmission)> { prop_oneof![ - (any_puzzle_commitment(), collection::vec(any::(), 256..=256)).prop_map(|(pc, bytes)| ( + (any_solution_id(), collection::vec(any::(), 256..=256)).prop_map(|(pc, bytes)| ( TransmissionID::Solution(pc), Transmission::Solution(Data::Buffer(Bytes::from(bytes))) )), diff --git a/node/bft/events/src/validators_response.rs b/node/bft/events/src/validators_response.rs index 2299763060..f7dfeaad14 100644 --- a/node/bft/events/src/validators_response.rs +++ b/node/bft/events/src/validators_response.rs @@ -72,7 +72,7 @@ pub mod prop_tests { use std::net::{IpAddr, SocketAddr}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_valid_socket_addr() -> BoxedStrategy { any::<(IpAddr, u16)>().prop_map(|(ip_addr, port)| SocketAddr::new(ip_addr, port)).boxed() diff --git a/node/bft/events/src/worker_ping.rs b/node/bft/events/src/worker_ping.rs index 98a7f3ad95..1bb578d0f0 100644 --- a/node/bft/events/src/worker_ping.rs +++ b/node/bft/events/src/worker_ping.rs @@ -74,7 +74,7 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_worker_ping() -> BoxedStrategy> { hash_set(any_transmission_id(), 1..16).prop_map(|ids| WorkerPing::new(ids.into_iter().collect())).boxed() diff --git a/node/bft/examples/simple_node.rs b/node/bft/examples/simple_node.rs index 116d3693f1..1af54fcd55 100644 --- a/node/bft/examples/simple_node.rs +++ b/node/bft/examples/simple_node.rs @@ -20,23 +20,23 @@ use snarkos_node_bft::{ helpers::{init_consensus_channels, init_primary_channels, ConsensusReceiver, PrimarySender, Storage}, Primary, BFT, - MAX_GC_ROUNDS, MEMORY_POOL_PORT, }; -use snarkos_node_bft_ledger_service::MockLedgerService; +use snarkos_node_bft_ledger_service::TranslucentLedgerService; use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ + console::{account::PrivateKey, algorithms::BHP256, types::Address}, ledger::{ - committee::{Committee, MIN_VALIDATOR_STAKE}, - narwhal::Data, - }, - prelude::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, - Field, - Network, - Uniform, + committee::{Committee, MIN_VALIDATOR_STAKE}, + narwhal::{BatchHeader, Data}, + puzzle::{Solution, SolutionID}, + store::{helpers::memory::ConsensusMemory, ConsensusStore}, + Block, + Ledger, }, + prelude::{Field, Hash, Network, Uniform, VM}, + utilities::{to_bytes_le, FromBytes, TestRng, ToBits, ToBytes}, }; use ::bytes::Bytes; @@ -51,15 +51,21 @@ use axum::{ use axum_extra::response::ErasedJson; use clap::{Parser, ValueEnum}; use indexmap::IndexMap; -use rand::{Rng, SeedableRng}; -use std::{collections::HashMap, net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc}; +use rand::{CryptoRng, Rng, SeedableRng}; +use std::{ + collections::HashMap, + net::SocketAddr, + path::PathBuf, + str::FromStr, + sync::{atomic::AtomicBool, Arc, Mutex, OnceLock}, +}; use tokio::{net::TcpListener, sync::oneshot}; use tracing_subscriber::{ layer::{Layer, SubscriberExt}, util::SubscriberInitExt, }; -type CurrentNetwork = snarkvm::prelude::Testnet3; +type CurrentNetwork = snarkvm::prelude::MainnetV0; /**************************************************************************************************/ @@ -113,10 +119,14 @@ pub async fn start_bft( let (sender, receiver) = init_primary_channels(); // Initialize the components. let (committee, account) = initialize_components(node_id, num_nodes)?; - // Initialize the mock ledger service. - let ledger = Arc::new(MockLedgerService::new(committee)); + // Initialize the translucent ledger service. + let ledger = create_ledger(&account, num_nodes, committee, node_id); // Initialize the storage. - let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), MAX_GC_ROUNDS); + let storage = Storage::new( + ledger.clone(), + Arc::new(BFTMemoryService::new()), + BatchHeader::::MAX_GC_ROUNDS as u64, + ); // Initialize the gateway IP and dev mode. let (ip, dev) = match peers.get(&node_id) { Some(ip) => (Some(*ip), None), @@ -150,10 +160,14 @@ pub async fn start_primary( let (sender, receiver) = init_primary_channels(); // Initialize the components. let (committee, account) = initialize_components(node_id, num_nodes)?; - // Initialize the mock ledger service. - let ledger = Arc::new(MockLedgerService::new(committee)); + // Initialize the translucent ledger service. + let ledger = create_ledger(&account, num_nodes, committee, node_id); // Initialize the storage. - let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), MAX_GC_ROUNDS); + let storage = Storage::new( + ledger.clone(), + Arc::new(BFTMemoryService::new()), + BatchHeader::::MAX_GC_ROUNDS as u64, + ); // Initialize the gateway IP and dev mode. let (ip, dev) = match peers.get(&node_id) { Some(ip) => (Some(*ip), None), @@ -171,6 +185,81 @@ pub async fn start_primary( Ok((primary, sender)) } +/// Initialize the translucent ledger service. +fn create_ledger( + account: &Account, + num_nodes: u16, + committee: Committee, + node_id: u16, +) -> Arc>> { + let gen_key = account.private_key(); + let public_balance_per_validator = + (CurrentNetwork::STARTING_SUPPLY - (num_nodes as u64) * MIN_VALIDATOR_STAKE) / (num_nodes as u64); + let mut balances = IndexMap::, u64>::new(); + for address in committee.members().keys() { + balances.insert(*address, public_balance_per_validator); + } + let mut rng = TestRng::default(); + let gen_ledger = genesis_ledger(*gen_key, committee.clone(), balances.clone(), node_id, &mut rng); + Arc::new(TranslucentLedgerService::new(gen_ledger, Arc::new(AtomicBool::new(false)))) +} + +pub type CurrentLedger = Ledger>; + +fn genesis_cache() -> &'static Mutex, Block>> { + static CACHE: OnceLock, Block>>> = OnceLock::new(); + CACHE.get_or_init(|| Mutex::new(HashMap::new())) +} + +fn genesis_block( + genesis_private_key: PrivateKey, + committee: Committee, + public_balances: IndexMap, u64>, + rng: &mut (impl Rng + CryptoRng), +) -> Block { + // Initialize the store. + let store = ConsensusStore::<_, ConsensusMemory<_>>::open(None).unwrap(); + // Initialize a new VM. + let vm = VM::from(store).unwrap(); + // Initialize the genesis block. + let bonded_balances: IndexMap<_, _> = + committee.members().iter().map(|(address, (amount, _))| (*address, (*address, *address, *amount))).collect(); + vm.genesis_quorum(&genesis_private_key, committee, public_balances, bonded_balances, rng).unwrap() +} + +fn genesis_ledger( + genesis_private_key: PrivateKey, + committee: Committee, + public_balances: IndexMap, u64>, + node_id: u16, + rng: &mut (impl Rng + CryptoRng), +) -> CurrentLedger { + let cache_key = + to_bytes_le![genesis_private_key, committee, public_balances.iter().collect::>()].unwrap(); + // Initialize the genesis block on the first call; other callers + // will wait for it on the mutex. + let block = genesis_cache() + .lock() + .unwrap() + .entry(cache_key.clone()) + .or_insert_with(|| { + let hasher = BHP256::::setup("aleo.dev.block").unwrap(); + let file_name = hasher.hash(&cache_key.to_bits_le()).unwrap().to_string() + ".genesis"; + let file_path = std::env::temp_dir().join(file_name); + if file_path.exists() { + let buffer = std::fs::read(file_path).unwrap(); + return Block::from_bytes_le(&buffer).unwrap(); + } + + let block = genesis_block(genesis_private_key, committee, public_balances, rng); + std::fs::write(&file_path, block.to_bytes_le().unwrap()).unwrap(); + block + }) + .clone(); + // Initialize the ledger with the genesis block. + CurrentLedger::load(block, aleo_std::StorageMode::Development(node_id)).unwrap() +} + /// Initializes the components of the node. fn initialize_components(node_id: u16, num_nodes: u16) -> Result<(Committee, Account)> { // Ensure that the node ID is valid. @@ -266,28 +355,27 @@ fn fire_unconfirmed_solutions(sender: &PrimarySender, node_id: u // This RNG samples *different* fake solutions for each node. let mut unique_rng = rand_chacha::ChaChaRng::seed_from_u64(node_id as u64); - // A closure to generate a commitment and solution. - fn sample(mut rng: impl Rng) -> (PuzzleCommitment, Data>) { - // Sample a random fake puzzle commitment. - // TODO (howardwu): Use a mutex to bring in the real 'proof target' and change this sampling to a while loop. - let commitment = PuzzleCommitment::::from_g1_affine(rng.gen()); + // A closure to generate a solution ID and solution. + fn sample(mut rng: impl Rng) -> (SolutionID, Data>) { + // Sample a random fake solution ID. + let solution_id = rng.gen::().into(); // Sample random fake solution bytes. let solution = Data::Buffer(Bytes::from((0..1024).map(|_| rng.gen::()).collect::>())); // Return the ID and solution. - (commitment, solution) + (solution_id, solution) } // Initialize a counter. let mut counter = 0; loop { - // Sample a random fake puzzle commitment and solution. - let (commitment, solution) = + // Sample a random fake solution ID and solution. + let (solution_id, solution) = if counter % 2 == 0 { sample(&mut shared_rng) } else { sample(&mut unique_rng) }; // Initialize a callback sender and receiver. let (callback, callback_receiver) = oneshot::channel(); // Send the fake solution. - if let Err(e) = tx_unconfirmed_solution.send((commitment, solution, callback)).await { + if let Err(e) = tx_unconfirmed_solution.send((solution_id, solution, callback)).await { error!("Failed to send unconfirmed solution: {e}"); } let _ = callback_receiver.await; diff --git a/node/bft/ledger-service/Cargo.toml b/node/bft/ledger-service/Cargo.toml index 8a37cdb97e..6f83c661e4 100644 --- a/node/bft/ledger-service/Cargo.toml +++ b/node/bft/ledger-service/Cargo.toml @@ -18,8 +18,9 @@ edition = "2021" [features] default = [ ] -ledger = [ "rand", "tokio", "tracing" ] +ledger = [ "lru", "parking_lot", "rand", "tokio", "tracing" ] ledger-write = [ ] +metrics = ["dep:metrics", "snarkvm/metrics"] mock = [ "parking_lot", "tracing" ] prover = [ ] test = [ "mock", "translucent" ] @@ -32,6 +33,16 @@ version = "0.1" version = "2.1" features = [ "serde", "rayon" ] +[dependencies.lru] +version = "0.12" +optional = true + +[dependencies.metrics] +package = "snarkos-node-metrics" +path = "../../metrics" +version = "=2.2.7" +optional = true + [dependencies.parking_lot] version = "0.12" optional = true diff --git a/node/bft/ledger-service/src/ledger.rs b/node/bft/ledger-service/src/ledger.rs index a34a7f2a08..c5a500c50d 100644 --- a/node/bft/ledger-service/src/ledger.rs +++ b/node/bft/ledger-service/src/ledger.rs @@ -16,18 +16,21 @@ use crate::{fmt_id, spawn_blocking, LedgerService}; use snarkvm::{ ledger::{ block::{Block, Transaction}, - coinbase::{CoinbaseVerifyingKey, ProverSolution, PuzzleCommitment}, committee::Committee, narwhal::{BatchCertificate, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, store::ConsensusStorage, Ledger, }, - prelude::{bail, Field, Network, Result}, + prelude::{bail, Address, Field, FromBytes, Network, Result}, }; use indexmap::IndexMap; +use lru::LruCache; +use parking_lot::{Mutex, RwLock}; use std::{ fmt, + io::Read, ops::Range, sync::{ atomic::{AtomicBool, Ordering}, @@ -35,18 +38,23 @@ use std::{ }, }; +/// The capacity of the LRU holding the recently queried committees. +const COMMITTEE_CACHE_SIZE: usize = 16; + /// A core ledger service. +#[allow(clippy::type_complexity)] pub struct CoreLedgerService> { ledger: Ledger, - coinbase_verifying_key: Arc>, + committee_cache: Arc>>>, + latest_leader: Arc)>>>, shutdown: Arc, } impl> CoreLedgerService { /// Initializes a new core ledger service. pub fn new(ledger: Ledger, shutdown: Arc) -> Self { - let coinbase_verifying_key = Arc::new(ledger.coinbase_puzzle().coinbase_verifying_key().clone()); - Self { ledger, coinbase_verifying_key, shutdown } + let committee_cache = Arc::new(Mutex::new(LruCache::new(COMMITTEE_CACHE_SIZE.try_into().unwrap()))); + Self { ledger, committee_cache, latest_leader: Default::default(), shutdown } } } @@ -74,6 +82,16 @@ impl> LedgerService for CoreLedgerService< self.ledger.latest_block() } + /// Returns the latest cached leader and its associated round. + fn latest_leader(&self) -> Option<(u64, Address)> { + *self.latest_leader.read() + } + + /// Updates the latest cached leader and its associated round. + fn update_latest_leader(&self, round: u64, leader: Address) { + *self.latest_leader.write() = Some((round, leader)); + } + /// Returns `true` if the given block height exists in the ledger. fn contains_block_height(&self, height: u32) -> bool { self.ledger.contains_block_height(height).unwrap_or(false) @@ -89,6 +107,11 @@ impl> LedgerService for CoreLedgerService< self.ledger.get_hash(height) } + /// Returns the block round for the given block height, if it exists. + fn get_block_round(&self, height: u32) -> Result { + self.ledger.get_block(height).map(|block| block.round()) + } + /// Returns the block for the given block height. fn get_block(&self, height: u32) -> Result> { self.ledger.get_block(height) @@ -101,7 +124,7 @@ impl> LedgerService for CoreLedgerService< } /// Returns the solution for the given solution ID. - fn get_solution(&self, solution_id: &PuzzleCommitment) -> Result> { + fn get_solution(&self, solution_id: &SolutionID) -> Result> { self.ledger.get_solution(solution_id) } @@ -125,17 +148,26 @@ impl> LedgerService for CoreLedgerService< } /// Returns the committee for the given round. - /// If the given round is in the future, then the current committee is returned. fn get_committee_for_round(&self, round: u64) -> Result> { + // Check if the committee is already in the cache. + if let Some(committee) = self.committee_cache.lock().get(&round) { + return Ok(committee.clone()); + } + match self.ledger.get_committee_for_round(round)? { // Return the committee if it exists. - Some(committee) => Ok(committee), - // Return the current committee if the round is in the future. + Some(committee) => { + // Insert the committee into the cache. + self.committee_cache.lock().push(round, committee.clone()); + // Return the committee. + Ok(committee) + } + // Return the current committee if the round is equivalent. None => { // Retrieve the current committee. let current_committee = self.current_committee()?; - // Return the current committee if the round is in the future. - match current_committee.starting_round() <= round { + // Return the current committee if the round is equivalent. + match current_committee.starting_round() == round { true => Ok(current_committee), false => bail!("No committee found for round {round} in the ledger"), } @@ -143,9 +175,8 @@ impl> LedgerService for CoreLedgerService< } } - /// Returns the previous committee for the given round. - /// If the previous round is in the future, then the current committee is returned. - fn get_previous_committee_for_round(&self, round: u64) -> Result> { + /// Returns the committee lookback for the given round. + fn get_committee_lookback_for_round(&self, round: u64) -> Result> { // Get the round number for the previous committee. Note, we subtract 2 from odd rounds, // because committees are updated in even rounds. let previous_round = match round % 2 == 0 { @@ -153,8 +184,11 @@ impl> LedgerService for CoreLedgerService< false => round.saturating_sub(2), }; - // Retrieve the committee for the previous round. - self.get_committee_for_round(previous_round) + // Get the committee lookback round. + let committee_lookback_round = previous_round.saturating_sub(Committee::::COMMITTEE_LOOKBACK_RANGE); + + // Retrieve the committee for the committee lookback round. + self.get_committee_for_round(committee_lookback_round) } /// Returns `true` if the ledger contains the given certificate ID in block history. @@ -166,13 +200,13 @@ impl> LedgerService for CoreLedgerService< fn contains_transmission(&self, transmission_id: &TransmissionID) -> Result { match transmission_id { TransmissionID::Ratification => Ok(false), - TransmissionID::Solution(puzzle_commitment) => self.ledger.contains_puzzle_commitment(puzzle_commitment), + TransmissionID::Solution(solution_id) => self.ledger.contains_solution_id(solution_id), TransmissionID::Transaction(transaction_id) => self.ledger.contains_transaction_id(transaction_id), } } - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, transmission: &mut Transmission, @@ -180,32 +214,35 @@ impl> LedgerService for CoreLedgerService< match (transmission_id, transmission) { (TransmissionID::Ratification, Transmission::Ratification) => {} (TransmissionID::Transaction(expected_transaction_id), Transmission::Transaction(transaction_data)) => { - match transaction_data.clone().deserialize_blocking() { - Ok(transaction) => { - if transaction.id() != expected_transaction_id { - bail!( - "Received mismatching transaction ID - expected {}, found {}", - fmt_id(expected_transaction_id), - fmt_id(transaction.id()), - ); - } - - // Update the transmission with the deserialized transaction. - *transaction_data = Data::Object(transaction); - } - Err(err) => { - bail!("Failed to deserialize transaction: {err}"); - } + // Deserialize the transaction. If the transaction exceeds the maximum size, then return an error. + let transaction = match transaction_data.clone() { + Data::Object(transaction) => transaction, + Data::Buffer(bytes) => Transaction::::read_le(&mut bytes.take(N::MAX_TRANSACTION_SIZE as u64))?, + }; + // Ensure the transaction ID matches the expected transaction ID. + if transaction.id() != expected_transaction_id { + bail!( + "Received mismatching transaction ID - expected {}, found {}", + fmt_id(expected_transaction_id), + fmt_id(transaction.id()), + ); + } + // Ensure the transaction is not a fee transaction. + if transaction.is_fee() { + bail!("Received a fee transaction in a transmission"); } + + // Update the transmission with the deserialized transaction. + *transaction_data = Data::Object(transaction); } - (TransmissionID::Solution(expected_commitment), Transmission::Solution(solution_data)) => { + (TransmissionID::Solution(expected_solution_id), Transmission::Solution(solution_data)) => { match solution_data.clone().deserialize_blocking() { Ok(solution) => { - if solution.commitment() != expected_commitment { + if solution.id() != expected_solution_id { bail!( "Received mismatching solution ID - expected {}, found {}", - fmt_id(expected_commitment), - fmt_id(solution.commitment()), + fmt_id(expected_solution_id), + fmt_id(solution.id()), ); } @@ -226,30 +263,25 @@ impl> LedgerService for CoreLedgerService< } /// Checks the given solution is well-formed. - async fn check_solution_basic( - &self, - puzzle_commitment: PuzzleCommitment, - solution: Data>, - ) -> Result<()> { + async fn check_solution_basic(&self, solution_id: SolutionID, solution: Data>) -> Result<()> { // Deserialize the solution. let solution = spawn_blocking!(solution.deserialize_blocking())?; - // Ensure the puzzle commitment matches in the solution. - if puzzle_commitment != solution.commitment() { - bail!("Invalid solution - expected {puzzle_commitment}, found {}", solution.commitment()); + // Ensure the solution ID matches in the solution. + if solution_id != solution.id() { + bail!("Invalid solution - expected {solution_id}, found {}", solution.id()); } - // Retrieve the coinbase verifying key. - let coinbase_verifying_key = self.coinbase_verifying_key.clone(); - // Compute the current epoch challenge. - let epoch_challenge = self.ledger.latest_epoch_challenge()?; + // Compute the current epoch hash. + let epoch_hash = self.ledger.latest_epoch_hash()?; // Retrieve the current proof target. let proof_target = self.ledger.latest_proof_target(); - // Ensure that the prover solution is valid for the given epoch. - if !spawn_blocking!(solution.verify(&coinbase_verifying_key, &epoch_challenge, proof_target))? { - bail!("Invalid prover solution '{puzzle_commitment}' for the current epoch."); + // Ensure that the solution is valid for the given epoch. + let puzzle = self.ledger.puzzle().clone(); + match spawn_blocking!(puzzle.check_solution(&solution, epoch_hash, proof_target)) { + Ok(()) => Ok(()), + Err(e) => bail!("Invalid solution '{}' for the current epoch - {e}", fmt_id(solution_id)), } - Ok(()) } /// Checks the given transaction is well-formed and unique. @@ -258,8 +290,13 @@ impl> LedgerService for CoreLedgerService< transaction_id: N::TransactionID, transaction: Data>, ) -> Result<()> { - // Deserialize the transaction. - let transaction = spawn_blocking!(transaction.deserialize_blocking())?; + // Deserialize the transaction. If the transaction exceeds the maximum size, then return an error. + let transaction = spawn_blocking!({ + match transaction { + Data::Object(transaction) => Ok(transaction), + Data::Buffer(bytes) => Ok(Transaction::::read_le(&mut bytes.take(N::MAX_TRANSACTION_SIZE as u64))?), + } + })?; // Ensure the transaction ID matches in the transaction. if transaction_id != transaction.id() { bail!("Invalid transaction - expected {transaction_id}, found {}", transaction.id()); @@ -285,7 +322,7 @@ impl> LedgerService for CoreLedgerService< subdag: Subdag, transmissions: IndexMap, Transmission>, ) -> Result> { - self.ledger.prepare_advance_to_next_quorum_block(subdag, transmissions) + self.ledger.prepare_advance_to_next_quorum_block(subdag, transmissions, &mut rand::thread_rng()) } /// Adds the given block as the next block in the ledger. @@ -297,6 +334,19 @@ impl> LedgerService for CoreLedgerService< } // Advance to the next block. self.ledger.advance_to_next_block(block)?; + // Update BFT metrics. + #[cfg(feature = "metrics")] + { + let num_sol = block.solutions().len(); + let num_tx = block.transactions().len(); + + metrics::gauge(metrics::bft::HEIGHT, block.height() as f64); + metrics::gauge(metrics::bft::LAST_COMMITTED_ROUND, block.round() as f64); + metrics::increment_gauge(metrics::blocks::SOLUTIONS, num_sol as f64); + metrics::increment_gauge(metrics::blocks::TRANSACTIONS, num_tx as f64); + metrics::update_block_metrics(block); + } + tracing::info!("\n\nAdvanced to block {} at round {} - {}\n", block.height(), block.round(), block.hash()); Ok(()) } diff --git a/node/bft/ledger-service/src/mock.rs b/node/bft/ledger-service/src/mock.rs index 5b2732370a..7a182446b6 100644 --- a/node/bft/ledger-service/src/mock.rs +++ b/node/bft/ledger-service/src/mock.rs @@ -16,11 +16,11 @@ use crate::{fmt_id, LedgerService}; use snarkvm::{ ledger::{ block::{Block, Transaction}, - coinbase::{ProverSolution, PuzzleCommitment}, committee::Committee, narwhal::{BatchCertificate, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, - prelude::{bail, ensure, Field, Network, Result}, + prelude::{bail, ensure, Address, Field, Network, Result}, }; use indexmap::IndexMap; @@ -32,22 +32,22 @@ use tracing::*; #[derive(Debug)] pub struct MockLedgerService { committee: Committee, - height_to_hash: Mutex>, + height_to_round_and_hash: Mutex>, } impl MockLedgerService { /// Initializes a new mock ledger service. pub fn new(committee: Committee) -> Self { - Self { committee, height_to_hash: Default::default() } + Self { committee, height_to_round_and_hash: Default::default() } } /// Initializes a new mock ledger service at the specified height. pub fn new_at_height(committee: Committee, height: u32) -> Self { let mut height_to_hash = BTreeMap::new(); for i in 0..=height { - height_to_hash.insert(i, (Field::::from_u32(i)).into()); + height_to_hash.insert(i, (i as u64 * 2, Field::::from_u32(i).into())); } - Self { committee, height_to_hash: Mutex::new(height_to_hash) } + Self { committee, height_to_round_and_hash: Mutex::new(height_to_hash) } } } @@ -55,12 +55,12 @@ impl MockLedgerService { impl LedgerService for MockLedgerService { /// Returns the latest round in the ledger. fn latest_round(&self) -> u64 { - *self.height_to_hash.lock().keys().last().unwrap_or(&0) as u64 + *self.height_to_round_and_hash.lock().keys().last().unwrap_or(&0) as u64 } /// Returns the latest block height in the canonical ledger. fn latest_block_height(&self) -> u32 { - self.height_to_hash.lock().last_key_value().map(|(height, _)| *height).unwrap_or(0) + self.height_to_round_and_hash.lock().last_key_value().map(|(height, _)| *height).unwrap_or(0) } /// Returns the latest block in the ledger. @@ -68,14 +68,27 @@ impl LedgerService for MockLedgerService { unreachable!("MockLedgerService does not support latest_block") } + /// Returns the latest cached leader and its associated round. + fn latest_leader(&self) -> Option<(u64, Address)> { + None + } + + /// Updates the latest cached leader and its associated round. + fn update_latest_leader(&self, _round: u64, _leader: Address) {} + /// Returns `true` if the given block height exists in the canonical ledger. fn contains_block_height(&self, height: u32) -> bool { - self.height_to_hash.lock().contains_key(&height) + self.height_to_round_and_hash.lock().contains_key(&height) } /// Returns the canonical block height for the given block hash, if it exists. fn get_block_height(&self, hash: &N::BlockHash) -> Result { - match self.height_to_hash.lock().iter().find_map(|(height, h)| if h == hash { Some(*height) } else { None }) { + match self + .height_to_round_and_hash + .lock() + .iter() + .find_map(|(height, (_, h))| if h == hash { Some(*height) } else { None }) + { Some(height) => Ok(height), None => bail!("Missing block {hash}"), } @@ -83,8 +96,21 @@ impl LedgerService for MockLedgerService { /// Returns the canonical block hash for the given block height, if it exists. fn get_block_hash(&self, height: u32) -> Result { - match self.height_to_hash.lock().get(&height).cloned() { - Some(hash) => Ok(hash), + match self.height_to_round_and_hash.lock().get(&height).cloned() { + Some((_, hash)) => Ok(hash), + None => bail!("Missing block {height}"), + } + } + + /// Returns the block round for the given block height, if it exists. + fn get_block_round(&self, height: u32) -> Result { + match self + .height_to_round_and_hash + .lock() + .iter() + .find_map(|(h, (round, _))| if *h == height { Some(*round) } else { None }) + { + Some(round) => Ok(round), None => bail!("Missing block {height}"), } } @@ -101,7 +127,7 @@ impl LedgerService for MockLedgerService { } /// Returns the solution for the given solution ID. - fn get_solution(&self, _solution_id: &PuzzleCommitment) -> Result> { + fn get_solution(&self, _solution_id: &SolutionID) -> Result> { unreachable!("MockLedgerService does not support get_solution") } @@ -121,13 +147,12 @@ impl LedgerService for MockLedgerService { } /// Returns the committee for the given round. - /// If the given round is in the future, then the current committee is returned. fn get_committee_for_round(&self, _round: u64) -> Result> { Ok(self.committee.clone()) } - /// Returns the previous committee for the given round. - fn get_previous_committee_for_round(&self, _round: u64) -> Result> { + /// Returns the committee lookback for the given round. + fn get_committee_lookback_for_round(&self, _round: u64) -> Result> { Ok(self.committee.clone()) } @@ -143,8 +168,8 @@ impl LedgerService for MockLedgerService { Ok(false) } - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, _transmission: &mut Transmission, @@ -154,12 +179,8 @@ impl LedgerService for MockLedgerService { } /// Checks the given solution is well-formed. - async fn check_solution_basic( - &self, - puzzle_commitment: PuzzleCommitment, - _solution: Data>, - ) -> Result<()> { - trace!("[MockLedgerService] Check solution basic {:?} - Ok", fmt_id(puzzle_commitment)); + async fn check_solution_basic(&self, solution_id: SolutionID, _solution: Data>) -> Result<()> { + trace!("[MockLedgerService] Check solution basic {:?} - Ok", fmt_id(solution_id)); Ok(()) } @@ -197,7 +218,7 @@ impl LedgerService for MockLedgerService { block.height(), self.latest_block_height() ); - self.height_to_hash.lock().insert(block.height(), block.hash()); + self.height_to_round_and_hash.lock().insert(block.height(), (block.round(), block.hash())); Ok(()) } } diff --git a/node/bft/ledger-service/src/prover.rs b/node/bft/ledger-service/src/prover.rs index 8677f9c3d9..0038e74037 100644 --- a/node/bft/ledger-service/src/prover.rs +++ b/node/bft/ledger-service/src/prover.rs @@ -16,11 +16,11 @@ use crate::LedgerService; use snarkvm::{ ledger::{ block::{Block, Transaction}, - coinbase::{ProverSolution, PuzzleCommitment}, committee::Committee, narwhal::{BatchCertificate, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, - prelude::{bail, Field, Network, Result}, + prelude::{bail, Address, Field, Network, Result}, }; use indexmap::IndexMap; @@ -56,6 +56,16 @@ impl LedgerService for ProverLedgerService { unreachable!("Latest block does not exist in prover") } + /// Returns the latest cached leader and its associated round. + fn latest_leader(&self) -> Option<(u64, Address)> { + unreachable!("Latest leader does not exist in prover"); + } + + /// Updates the latest cached leader and its associated round. + fn update_latest_leader(&self, _round: u64, _leader: Address) { + unreachable!("Latest leader does not exist in prover"); + } + /// Returns `true` if the given block height exists in the ledger. fn contains_block_height(&self, _height: u32) -> bool { false @@ -71,6 +81,11 @@ impl LedgerService for ProverLedgerService { bail!("Block {height} does not exist in prover") } + /// Returns the block round for the given block height, if it exists. + fn get_block_round(&self, height: u32) -> Result { + bail!("Block {height} does not exist in prover") + } + /// Returns the block for the given block height. fn get_block(&self, height: u32) -> Result> { bail!("Block {height} does not exist in prover") @@ -83,7 +98,7 @@ impl LedgerService for ProverLedgerService { } /// Returns the solution for the given solution ID. - fn get_solution(&self, solution_id: &PuzzleCommitment) -> Result> { + fn get_solution(&self, solution_id: &SolutionID) -> Result> { bail!("Solution '{solution_id}' does not exist in prover") } @@ -103,14 +118,12 @@ impl LedgerService for ProverLedgerService { } /// Returns the committee for the given round. - /// If the given round is in the future, then the current committee is returned. fn get_committee_for_round(&self, round: u64) -> Result> { bail!("Committee for round {round} does not exist in prover") } - /// Returns the previous committee for the given round. - /// If the previous round is in the future, then the current committee is returned. - fn get_previous_committee_for_round(&self, round: u64) -> Result> { + /// Returns the committee lookback for the given round. + fn get_committee_lookback_for_round(&self, round: u64) -> Result> { bail!("Previous committee for round {round} does not exist in prover") } @@ -124,8 +137,8 @@ impl LedgerService for ProverLedgerService { bail!("Transmission '{transmission_id}' does not exist in prover") } - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, _transmission_id: TransmissionID, _transmission: &mut Transmission, @@ -134,11 +147,7 @@ impl LedgerService for ProverLedgerService { } /// Checks the given solution is well-formed. - async fn check_solution_basic( - &self, - _puzzle_commitment: PuzzleCommitment, - _solution: Data>, - ) -> Result<()> { + async fn check_solution_basic(&self, _solution_id: SolutionID, _solution: Data>) -> Result<()> { Ok(()) } diff --git a/node/bft/ledger-service/src/traits.rs b/node/bft/ledger-service/src/traits.rs index 82a546d1ba..d8477c3a21 100644 --- a/node/bft/ledger-service/src/traits.rs +++ b/node/bft/ledger-service/src/traits.rs @@ -15,11 +15,11 @@ use snarkvm::{ ledger::{ block::{Block, Transaction}, - coinbase::{ProverSolution, PuzzleCommitment}, committee::Committee, narwhal::{BatchCertificate, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, - prelude::{Field, Network, Result}, + prelude::{Address, Field, Network, Result}, }; use indexmap::IndexMap; @@ -36,6 +36,12 @@ pub trait LedgerService: Debug + Send + Sync { /// Returns the latest block in the ledger. fn latest_block(&self) -> Block; + /// Returns the latest cached leader and its associated round. + fn latest_leader(&self) -> Option<(u64, Address)>; + + /// Updates the latest cached leader and its associated round. + fn update_latest_leader(&self, round: u64, leader: Address); + /// Returns `true` if the given block height exists in the ledger. fn contains_block_height(&self, height: u32) -> bool; @@ -45,6 +51,9 @@ pub trait LedgerService: Debug + Send + Sync { /// Returns the block hash for the given block height, if it exists. fn get_block_hash(&self, height: u32) -> Result; + /// Returns the block round for the given block height, if it exists. + fn get_block_round(&self, height: u32) -> Result; + /// Returns the block for the given block height. fn get_block(&self, height: u32) -> Result>; @@ -53,7 +62,7 @@ pub trait LedgerService: Debug + Send + Sync { fn get_blocks(&self, heights: Range) -> Result>>; /// Returns the solution for the given solution ID. - fn get_solution(&self, solution_id: &PuzzleCommitment) -> Result>; + fn get_solution(&self, solution_id: &SolutionID) -> Result>; /// Returns the unconfirmed transaction for the given transaction ID. fn get_unconfirmed_transaction(&self, transaction_id: N::TransactionID) -> Result>; @@ -65,12 +74,10 @@ pub trait LedgerService: Debug + Send + Sync { fn current_committee(&self) -> Result>; /// Returns the committee for the given round. - /// If the given round is in the future, then the current committee is returned. fn get_committee_for_round(&self, round: u64) -> Result>; - /// Returns the previous committee for the given round. - /// If the previous round is in the future, then the current committee is returned. - fn get_previous_committee_for_round(&self, round: u64) -> Result>; + /// Returns the committee lookback for the given round. + fn get_committee_lookback_for_round(&self, round: u64) -> Result>; /// Returns `true` if the ledger contains the given certificate ID. fn contains_certificate(&self, certificate_id: &Field) -> Result; @@ -78,19 +85,15 @@ pub trait LedgerService: Debug + Send + Sync { /// Returns `true` if the ledger contains the given transmission ID. fn contains_transmission(&self, transmission_id: &TransmissionID) -> Result; - /// Ensures the given transmission ID matches the given transmission. - fn ensure_transmission_id_matches( + /// Ensures that the given transmission is not a fee and matches the given transmission ID. + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, transmission: &mut Transmission, ) -> Result<()>; /// Checks the given solution is well-formed. - async fn check_solution_basic( - &self, - puzzle_commitment: PuzzleCommitment, - solution: Data>, - ) -> Result<()>; + async fn check_solution_basic(&self, solution_id: SolutionID, solution: Data>) -> Result<()>; /// Checks the given transaction is well-formed and unique. async fn check_transaction_basic( diff --git a/node/bft/ledger-service/src/translucent.rs b/node/bft/ledger-service/src/translucent.rs index cf327a173f..8a5baf6422 100644 --- a/node/bft/ledger-service/src/translucent.rs +++ b/node/bft/ledger-service/src/translucent.rs @@ -18,13 +18,13 @@ use indexmap::IndexMap; use snarkvm::{ ledger::{ block::{Block, Transaction}, - coinbase::{ProverSolution, PuzzleCommitment}, committee::Committee, narwhal::{Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, store::ConsensusStorage, Ledger, }, - prelude::{narwhal::BatchCertificate, Field, Network, Result}, + prelude::{narwhal::BatchCertificate, Address, Field, Network, Result}, }; use std::{ fmt, @@ -67,6 +67,16 @@ impl> LedgerService for TranslucentLedgerS self.inner.latest_block() } + /// Returns the latest cached leader and its associated round. + fn latest_leader(&self) -> Option<(u64, Address)> { + self.inner.latest_leader() + } + + /// Updates the latest cached leader and its associated round. + fn update_latest_leader(&self, round: u64, leader: Address) { + self.inner.update_latest_leader(round, leader); + } + /// Returns `true` if the given block height exists in the ledger. fn contains_block_height(&self, height: u32) -> bool { self.inner.contains_block_height(height) @@ -82,6 +92,11 @@ impl> LedgerService for TranslucentLedgerS self.inner.get_block_hash(height) } + /// Returns the block round for the given block height, if it exists. + fn get_block_round(&self, height: u32) -> Result { + self.inner.get_block_round(height) + } + /// Returns the block for the given block height. fn get_block(&self, height: u32) -> Result> { self.inner.get_block(height) @@ -94,7 +109,7 @@ impl> LedgerService for TranslucentLedgerS } /// Returns the solution for the given solution ID. - fn get_solution(&self, solution_id: &PuzzleCommitment) -> Result> { + fn get_solution(&self, solution_id: &SolutionID) -> Result> { self.inner.get_solution(solution_id) } @@ -114,13 +129,13 @@ impl> LedgerService for TranslucentLedgerS } /// Returns the committee for the given round. - /// If the given round is in the future, then the current committee is returned. fn get_committee_for_round(&self, round: u64) -> Result> { self.inner.get_committee_for_round(round) } - fn get_previous_committee_for_round(&self, round: u64) -> Result> { - self.inner.get_previous_committee_for_round(round) + /// Returns the committee lookback for the given round. + fn get_committee_lookback_for_round(&self, round: u64) -> Result> { + self.inner.get_committee_lookback_for_round(round) } /// Returns `true` if the ledger contains the given certificate ID in block history. @@ -134,7 +149,7 @@ impl> LedgerService for TranslucentLedgerS } /// Always succeeds. - fn ensure_transmission_id_matches( + fn ensure_transmission_is_well_formed( &self, _transmission_id: TransmissionID, _transmission: &mut Transmission, @@ -143,11 +158,7 @@ impl> LedgerService for TranslucentLedgerS } /// Always succeeds. - async fn check_solution_basic( - &self, - _puzzle_commitment: PuzzleCommitment, - _solution: Data>, - ) -> Result<()> { + async fn check_solution_basic(&self, _solution_id: SolutionID, _solution: Data>) -> Result<()> { Ok(()) } diff --git a/node/bft/src/bft.rs b/node/bft/src/bft.rs index 5637ddfd3e..fe3937e89e 100644 --- a/node/bft/src/bft.rs +++ b/node/bft/src/bft.rs @@ -33,9 +33,9 @@ use snarkvm::{ console::account::Address, ledger::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, committee::Committee, narwhal::{BatchCertificate, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, prelude::{bail, ensure, Field, Network, Result}, }; @@ -67,8 +67,6 @@ pub struct BFT { leader_certificate: Arc>>>, /// The timer for the leader certificate to be received. leader_certificate_timer: Arc, - /// The last election certificate IDs. - last_election_certificate_ids: Arc>>>, /// The consensus sender. consensus_sender: Arc>>, /// The spawned handles. @@ -92,7 +90,6 @@ impl BFT { dag: Default::default(), leader_certificate: Default::default(), leader_certificate_timer: Default::default(), - last_election_certificate_ids: Default::default(), consensus_sender: Default::default(), handles: Default::default(), lock: Default::default(), @@ -121,6 +118,11 @@ impl BFT { Ok(()) } + /// Returns `true` if the primary is synced. + pub fn is_synced(&self) -> bool { + self.primary.is_synced() + } + /// Returns the primary. pub const fn primary(&self) -> &Primary { &self.primary @@ -145,11 +147,6 @@ impl BFT { pub const fn leader_certificate(&self) -> &Arc>>> { &self.leader_certificate } - - /// Returns the last election certificate IDs. - pub fn last_election_certificate_ids(&self) -> IndexSet> { - self.last_election_certificate_ids.read().clone() - } } impl BFT { @@ -175,24 +172,24 @@ impl BFT { } impl BFT { - /// Returns the unconfirmed transmission IDs. - pub fn unconfirmed_transmission_ids(&self) -> impl '_ + Iterator> { - self.primary.unconfirmed_transmission_ids() + /// Returns the worker transmission IDs. + pub fn worker_transmission_ids(&self) -> impl '_ + Iterator> { + self.primary.worker_transmission_ids() } - /// Returns the unconfirmed transmissions. - pub fn unconfirmed_transmissions(&self) -> impl '_ + Iterator, Transmission)> { - self.primary.unconfirmed_transmissions() + /// Returns the worker transmissions. + pub fn worker_transmissions(&self) -> impl '_ + Iterator, Transmission)> { + self.primary.worker_transmissions() } - /// Returns the unconfirmed solutions. - pub fn unconfirmed_solutions(&self) -> impl '_ + Iterator, Data>)> { - self.primary.unconfirmed_solutions() + /// Returns the worker solutions. + pub fn worker_solutions(&self) -> impl '_ + Iterator, Data>)> { + self.primary.worker_solutions() } - /// Returns the unconfirmed transactions. - pub fn unconfirmed_transactions(&self) -> impl '_ + Iterator>)> { - self.primary.unconfirmed_transactions() + /// Returns the worker transactions. + pub fn worker_transactions(&self) -> impl '_ + Iterator>)> { + self.primary.worker_transactions() } } @@ -202,7 +199,9 @@ impl BFT { // Ensure the current round is at least the storage round (this is a sanity check). let storage_round = self.storage().current_round(); if current_round < storage_round { - warn!("BFT is safely skipping an update for round {current_round}, as storage is at round {storage_round}"); + debug!( + "BFT is safely skipping an update for round {current_round}, as storage is at round {storage_round}" + ); return false; } @@ -229,7 +228,7 @@ impl BFT { if let Some(leader_certificate) = self.leader_certificate.read().as_ref() { // Ensure the state of the leader certificate is consistent with the BFT being ready. if !is_ready { - error!(is_ready, "BFT - A leader certificate was found, but 'is_ready' is false"); + trace!(is_ready, "BFT - A leader certificate was found, but 'is_ready' is false"); } // Log the leader election. let leader_round = leader_certificate.round(); @@ -254,6 +253,7 @@ impl BFT { // Update to the next round in storage. if let Err(e) = self.storage().increment_to_next_round(current_round) { warn!("BFT failed to increment to the next round from round {current_round} - {e}"); + return false; } // Update the timer for the leader certificate. self.leader_certificate_timer.store(now(), Ordering::SeqCst); @@ -291,39 +291,56 @@ impl BFT { return false; } - // Retrieve the previous committee of the current round. - let previous_committee = match self.ledger().get_previous_committee_for_round(current_round) { + // Retrieve the committee lookback of the current round. + let committee_lookback = match self.ledger().get_committee_lookback_for_round(current_round) { Ok(committee) => committee, Err(e) => { - error!("BFT failed to retrieve the previous committee for the even round {current_round} - {e}"); + error!("BFT failed to retrieve the committee lookback for the even round {current_round} - {e}"); return false; } }; // Determine the leader of the current round. - let leader = match previous_committee.get_leader(current_round) { - Ok(leader) => leader, - Err(e) => { - error!("BFT failed to compute the leader for the even round {current_round} - {e}"); - return false; + let leader = match self.ledger().latest_leader() { + Some((cached_round, cached_leader)) if cached_round == current_round => cached_leader, + _ => { + // Compute the leader for the current round. + let computed_leader = match committee_lookback.get_leader(current_round) { + Ok(leader) => leader, + Err(e) => { + error!("BFT failed to compute the leader for the even round {current_round} - {e}"); + return false; + } + }; + + // Cache the computed leader. + self.ledger().update_latest_leader(current_round, computed_leader); + + computed_leader } }; // Find and set the leader certificate, if the leader was present in the current even round. let leader_certificate = current_certificates.iter().find(|certificate| certificate.author() == leader); *self.leader_certificate.write() = leader_certificate.cloned(); - self.is_even_round_ready_for_next_round(current_certificates, previous_committee, current_round) + self.is_even_round_ready_for_next_round(current_certificates, committee_lookback, current_round) } - /// Returns 'true' under one of the following conditions: - /// - If the leader certificate is set for the current even round, - /// - The timer for the leader certificate has expired, and we can - /// achieve quorum threshold (2f + 1) without the leader. + /// Returns 'true' if the quorum threshold `(2f + 1)` is reached for this round under one of the following conditions: + /// - If the leader certificate is set for the current even round. + /// - The timer for the leader certificate has expired. fn is_even_round_ready_for_next_round( &self, certificates: IndexSet>, committee: Committee, current_round: u64, ) -> bool { + // Retrieve the authors for the current round. + let authors = certificates.into_iter().map(|c| c.author()).collect(); + // Check if quorum threshold is reached. + if !committee.is_quorum_threshold_reached(&authors) { + trace!("BFT failed to reach quorum threshold in even round {current_round}"); + return false; + } // If the leader certificate is set for the current even round, return 'true'. if let Some(leader_certificate) = self.leader_certificate.read().as_ref() { if leader_certificate.round() == current_round { @@ -332,11 +349,8 @@ impl BFT { } // If the timer has expired, and we can achieve quorum threshold (2f + 1) without the leader, return 'true'. if self.is_timer_expired() { - debug!("BFT (timer expired) - Checking for quorum threshold (without the leader)"); - // Retrieve the certificate authors. - let authors = certificates.into_iter().map(|c| c.author()).collect(); - // Determine if the quorum threshold is reached. - return committee.is_quorum_threshold_reached(&authors); + debug!("BFT (timer expired) - Advancing from round {current_round} to the next round (without the leader)"); + return true; } // Otherwise, return 'false'. false @@ -347,9 +361,8 @@ impl BFT { self.leader_certificate_timer.load(Ordering::SeqCst) + MAX_LEADER_CERTIFICATE_DELAY_IN_SECS <= now() } - /// Returns 'true' if any of the following conditions hold: - /// - The leader certificate is 'None'. - /// - The leader certificate reached quorum threshold `(2f + 1)` (in the previous certificates in the current round). + /// Returns 'true' if the quorum threshold `(2f + 1)` is reached for this round under one of the following conditions: + /// - The leader certificate is `None`. /// - The leader certificate is not included up to availability threshold `(f + 1)` (in the previous certificates of the current round). /// - The leader certificate timer has expired. fn is_leader_quorum_or_nonleaders_available(&self, odd_round: u64) -> bool { @@ -365,31 +378,37 @@ impl BFT { error!("BFT does not compute stakes for the leader certificate in an even round"); return false; } - - // Retrieve the leader certificate. - let Some(leader_certificate) = self.leader_certificate.read().clone() else { - // If there is no leader certificate for the previous round, return 'true'. - return true; - }; - // Retrieve the leader certificate ID. - let leader_certificate_id = leader_certificate.id(); // Retrieve the certificates for the current round. let current_certificates = self.storage().get_certificates_for_round(current_round); - // Retrieve the previous committee of the current round. - let previous_committee = match self.ledger().get_previous_committee_for_round(current_round) { + // Retrieve the committee lookback for the current round. + let committee_lookback = match self.ledger().get_committee_lookback_for_round(current_round) { Ok(committee) => committee, Err(e) => { - error!("BFT failed to retrieve the previous committee for the odd round {current_round} - {e}"); + error!("BFT failed to retrieve the committee lookback for the odd round {current_round} - {e}"); return false; } }; - + // Retrieve the authors of the current certificates. + let authors = current_certificates.clone().into_iter().map(|c| c.author()).collect(); + // Check if quorum threshold is reached. + if !committee_lookback.is_quorum_threshold_reached(&authors) { + trace!("BFT failed reach quorum threshold in odd round {current_round}. "); + return false; + } + // Retrieve the leader certificate. + let Some(leader_certificate) = self.leader_certificate.read().clone() else { + // If there is no leader certificate for the previous round, return 'true'. + return true; + }; // Compute the stake for the leader certificate. - let (stake_with_leader, stake_without_leader) = - self.compute_stake_for_leader_certificate(leader_certificate_id, current_certificates, &previous_committee); + let (stake_with_leader, stake_without_leader) = self.compute_stake_for_leader_certificate( + leader_certificate.id(), + current_certificates, + &committee_lookback, + ); // Return 'true' if any of the following conditions hold: - stake_with_leader >= previous_committee.availability_threshold() - || stake_without_leader >= previous_committee.quorum_threshold() + stake_with_leader >= committee_lookback.availability_threshold() + || stake_without_leader >= committee_lookback.quorum_threshold() || self.is_timer_expired() } @@ -428,7 +447,10 @@ impl BFT { impl BFT { /// Stores the certificate in the DAG, and attempts to commit one or more anchors. - async fn update_dag(&self, certificate: BatchCertificate) -> Result<()> { + async fn update_dag( + &self, + certificate: BatchCertificate, + ) -> Result<()> { // Acquire the BFT lock. let _lock = self.lock.lock().await; @@ -448,14 +470,30 @@ impl BFT { return Ok(()); } - // Retrieve the previous committee for the commit round. - let Ok(previous_committee) = self.ledger().get_previous_committee_for_round(commit_round) else { - bail!("BFT failed to retrieve the committee for commit round {commit_round}"); + /* Proceeding to check if the leader is ready to be committed. */ + trace!("Checking if the leader is ready to be committed for round {commit_round}..."); + + // Retrieve the committee lookback for the commit round. + let Ok(committee_lookback) = self.ledger().get_committee_lookback_for_round(commit_round) else { + bail!("BFT failed to retrieve the committee with lag for commit round {commit_round}"); }; - // Compute the leader for the commit round. - let Ok(leader) = previous_committee.get_leader(commit_round) else { - bail!("BFT failed to compute the leader for commit round {commit_round}"); + + // Either retrieve the cached leader or compute it. + let leader = match self.ledger().latest_leader() { + Some((cached_round, cached_leader)) if cached_round == commit_round => cached_leader, + _ => { + // Compute the leader for the commit round. + let Ok(computed_leader) = committee_lookback.get_leader(commit_round) else { + bail!("BFT failed to compute the leader for commit round {commit_round}"); + }; + + // Cache the computed leader. + self.ledger().update_latest_leader(commit_round, computed_leader); + + computed_leader + } }; + // Retrieve the leader certificate for the commit round. let Some(leader_certificate) = self.dag.read().get_certificate_for_round_with_author(commit_round, leader) else { @@ -476,116 +514,165 @@ impl BFT { }) .collect(); // Check if the leader is ready to be committed. - if !previous_committee.is_availability_threshold_reached(&authors) { + if !committee_lookback.is_availability_threshold_reached(&authors) { // If the leader is not ready to be committed, return early. trace!("BFT is not ready to commit {commit_round}"); return Ok(()); } /* Proceeding to commit the leader. */ - info!("Proceeding to commit round {commit_round} with leader {leader}..."); + info!("Proceeding to commit round {commit_round} with leader '{}'", fmt_id(leader)); - // Prepare the election certificate IDs. - let election_certificate_ids = certificates.values().map(|c| c.id()).collect::>(); // Commit the leader certificate, and all previous leader certificates since the last committed round. - self.commit_leader_certificate::(leader_certificate, election_certificate_ids).await + self.commit_leader_certificate::(leader_certificate).await } /// Commits the leader certificate, and all previous leader certificates since the last committed round. async fn commit_leader_certificate( &self, leader_certificate: BatchCertificate, - election_certificate_ids: IndexSet>, ) -> Result<()> { - // Retrieve the leader certificate round. - let leader_round = leader_certificate.round(); - // Compute the commit subdag. - let commit_subdag = match self.order_dag_with_dfs::(leader_certificate) { - Ok(subdag) => subdag, - Err(e) => bail!("BFT failed to order the DAG with DFS - {e}"), - }; - // Initialize a map for the deduped transmissions. - let mut transmissions = IndexMap::new(); - // Start from the oldest leader certificate. - for certificate in commit_subdag.values().flatten() { - // Update the DAG. - if IS_SYNCING { - self.dag.write().commit(certificate, self.storage().max_gc_rounds()); - } - // Retrieve the transmissions. - for transmission_id in certificate.transmission_ids() { - // If the transmission already exists in the map, skip it. - if transmissions.contains_key(transmission_id) { - continue; - } - // If the transmission already exists in the ledger, skip it. - // Note: On failure to read from the ledger, we skip including this transmission, out of safety. - if self.ledger().contains_transmission(transmission_id).unwrap_or(true) { + // Fetch the leader round. + let latest_leader_round = leader_certificate.round(); + // Determine the list of all previous leader certificates since the last committed round. + // The order of the leader certificates is from **newest** to **oldest**. + let mut leader_certificates = vec![leader_certificate.clone()]; + { + // Retrieve the leader round. + let leader_round = leader_certificate.round(); + + let mut current_certificate = leader_certificate; + for round in (self.dag.read().last_committed_round() + 2..=leader_round.saturating_sub(2)).rev().step_by(2) + { + // Retrieve the previous committee for the leader round. + let previous_committee_lookback = match self.ledger().get_committee_lookback_for_round(round) { + Ok(committee) => committee, + Err(e) => { + bail!("BFT failed to retrieve a previous committee lookback for the even round {round} - {e}"); + } + }; + // Either retrieve the cached leader or compute it. + let leader = match self.ledger().latest_leader() { + Some((cached_round, cached_leader)) if cached_round == round => cached_leader, + _ => { + // Compute the leader for the commit round. + let computed_leader = match previous_committee_lookback.get_leader(round) { + Ok(leader) => leader, + Err(e) => { + bail!("BFT failed to compute the leader for the even round {round} - {e}"); + } + }; + + // Cache the computed leader. + self.ledger().update_latest_leader(round, computed_leader); + + computed_leader + } + }; + // Retrieve the previous leader certificate. + let Some(previous_certificate) = self.dag.read().get_certificate_for_round_with_author(round, leader) + else { continue; - } - // Retrieve the transmission. - let Some(transmission) = self.storage().get_transmission(*transmission_id) else { - bail!( - "BFT failed to retrieve transmission '{}' from round {}", - fmt_id(transmission_id), - certificate.round() - ); }; - // Add the transmission to the set. - transmissions.insert(*transmission_id, transmission); + // Determine if there is a path between the previous certificate and the current certificate. + if self.is_linked(previous_certificate.clone(), current_certificate.clone())? { + // Add the previous leader certificate to the list of certificates to commit. + leader_certificates.push(previous_certificate.clone()); + // Update the current certificate to the previous leader certificate. + current_certificate = previous_certificate; + } } } - // If the node is not syncing, trigger consensus, as this will build a new block for the ledger. - if !IS_SYNCING { - // Construct the subdag. - let subdag = Subdag::from(commit_subdag.clone(), election_certificate_ids.clone())?; - // Retrieve the anchor round. - let anchor_round = subdag.anchor_round(); - // Retrieve the number of transmissions. - let num_transmissions = transmissions.len(); - // Retrieve metadata about the subdag. - let subdag_metadata = subdag.iter().map(|(round, c)| (*round, c.len())).collect::>(); - - // Ensure the subdag anchor round matches the leader round. - ensure!( - anchor_round == leader_round, - "BFT failed to commit - the subdag anchor round {anchor_round} does not match the leader round {leader_round}", - ); - // Trigger consensus. - if let Some(consensus_sender) = self.consensus_sender.get() { - // Initialize a callback sender and receiver. - let (callback_sender, callback_receiver) = oneshot::channel(); - // Send the subdag and transmissions to consensus. - consensus_sender.tx_consensus_subdag.send((subdag, transmissions, callback_sender)).await?; - // Await the callback to continue. - match callback_receiver.await { - Ok(Ok(())) => (), // continue - Ok(Err(e)) => { - error!("BFT failed to advance the subdag for round {anchor_round} - {e}"); - return Ok(()); + // Iterate over the leader certificates to commit. + for leader_certificate in leader_certificates.into_iter().rev() { + // Retrieve the leader certificate round. + let leader_round = leader_certificate.round(); + // Compute the commit subdag. + let commit_subdag = match self.order_dag_with_dfs::(leader_certificate) { + Ok(subdag) => subdag, + Err(e) => bail!("BFT failed to order the DAG with DFS - {e}"), + }; + // If the node is not syncing, trigger consensus, as this will build a new block for the ledger. + if !IS_SYNCING { + // Initialize a map for the deduped transmissions. + let mut transmissions = IndexMap::new(); + // Start from the oldest leader certificate. + for certificate in commit_subdag.values().flatten() { + // Retrieve the transmissions. + for transmission_id in certificate.transmission_ids() { + // If the transmission already exists in the map, skip it. + if transmissions.contains_key(transmission_id) { + continue; + } + // If the transmission already exists in the ledger, skip it. + // Note: On failure to read from the ledger, we skip including this transmission, out of safety. + if self.ledger().contains_transmission(transmission_id).unwrap_or(true) { + continue; + } + // Retrieve the transmission. + let Some(transmission) = self.storage().get_transmission(*transmission_id) else { + bail!( + "BFT failed to retrieve transmission '{}' from round {}", + fmt_id(transmission_id), + certificate.round() + ); + }; + // Add the transmission to the set. + transmissions.insert(*transmission_id, transmission); } - Err(e) => { - error!("BFT failed to receive the callback for round {anchor_round} - {e}"); - return Ok(()); + } + // Trigger consensus, as this will build a new block for the ledger. + // Construct the subdag. + let subdag = Subdag::from(commit_subdag.clone())?; + // Retrieve the anchor round. + let anchor_round = subdag.anchor_round(); + // Retrieve the number of transmissions. + let num_transmissions = transmissions.len(); + // Retrieve metadata about the subdag. + let subdag_metadata = subdag.iter().map(|(round, c)| (*round, c.len())).collect::>(); + + // Ensure the subdag anchor round matches the leader round. + ensure!( + anchor_round == leader_round, + "BFT failed to commit - the subdag anchor round {anchor_round} does not match the leader round {leader_round}", + ); + + // Trigger consensus. + if let Some(consensus_sender) = self.consensus_sender.get() { + // Initialize a callback sender and receiver. + let (callback_sender, callback_receiver) = oneshot::channel(); + // Send the subdag and transmissions to consensus. + consensus_sender.tx_consensus_subdag.send((subdag, transmissions, callback_sender)).await?; + // Await the callback to continue. + match callback_receiver.await { + Ok(Ok(())) => (), // continue + Ok(Err(e)) => { + error!("BFT failed to advance the subdag for round {anchor_round} - {e}"); + return Ok(()); + } + Err(e) => { + error!("BFT failed to receive the callback for round {anchor_round} - {e}"); + return Ok(()); + } } } + + info!( + "\n\nCommitting a subdag from round {anchor_round} with {num_transmissions} transmissions: {subdag_metadata:?}\n" + ); } - info!( - "\n\nCommitting a subdag from round {anchor_round} with {num_transmissions} transmissions: {subdag_metadata:?}\n" - ); // Update the DAG, as the subdag was successfully included into a block. let mut dag_write = self.dag.write(); for certificate in commit_subdag.values().flatten() { dag_write.commit(certificate, self.storage().max_gc_rounds()); } } - // Update the last election certificate IDs. - { - let mut last_election_certificate_ids = self.last_election_certificate_ids.write(); - *last_election_certificate_ids = election_certificate_ids; - } + + // Perform garbage collection based on the latest committed leader round. + self.storage().garbage_collect_certificates(latest_leader_round); + Ok(()) } @@ -622,6 +709,11 @@ impl BFT { if self.dag.read().is_recently_committed(previous_round, *previous_certificate_id) { continue; } + // If the previous certificate already exists in the ledger, continue. + if ALLOW_LEDGER_ACCESS && self.ledger().contains_certificate(previous_certificate_id).unwrap_or(false) { + continue; + } + // Retrieve the previous certificate. let previous_certificate = { // Start by retrieving the previous certificate from the DAG. @@ -632,28 +724,11 @@ impl BFT { None => match self.storage().get_certificate(*previous_certificate_id) { // If the previous certificate is found, return it. Some(previous_certificate) => previous_certificate, - // Otherwise, retrieve the previous certificate from the ledger. - None => { - if ALLOW_LEDGER_ACCESS { - match self.ledger().get_batch_certificate(previous_certificate_id) { - // If the previous certificate is found, return it. - Ok(previous_certificate) => previous_certificate, - // Otherwise, the previous certificate is missing, and throw an error. - Err(e) => { - bail!( - "Missing previous certificate {} for round {previous_round} - {e}", - fmt_id(previous_certificate_id) - ) - } - } - } else { - // Otherwise, the previous certificate is missing, and throw an error. - bail!( - "Missing previous certificate {} for round {previous_round}", - fmt_id(previous_certificate_id) - ) - } - } + // Otherwise, the previous certificate is missing, and throw an error. + None => bail!( + "Missing previous certificate {} for round {previous_round}", + fmt_id(previous_certificate_id) + ), }, } }; @@ -668,28 +743,43 @@ impl BFT { // Return the certificates to commit. Ok(commit) } + + /// Returns `true` if there is a path from the previous certificate to the current certificate. + fn is_linked( + &self, + previous_certificate: BatchCertificate, + current_certificate: BatchCertificate, + ) -> Result { + // Initialize the list containing the traversal. + let mut traversal = vec![current_certificate.clone()]; + // Iterate over the rounds from the current certificate to the previous certificate. + for round in (previous_certificate.round()..current_certificate.round()).rev() { + // Retrieve all of the certificates for this past round. + let Some(certificates) = self.dag.read().get_certificates_for_round(round) else { + // This is a critical error, as the traversal should have these certificates. + // If this error is hit, it is likely that the maximum GC rounds should be increased. + bail!("BFT failed to retrieve the certificates for past round {round}"); + }; + // Filter the certificates to only include those that are in the traversal. + traversal = certificates + .into_values() + .filter(|p| traversal.iter().any(|c| c.previous_certificate_ids().contains(&p.id()))) + .collect(); + } + Ok(traversal.contains(&previous_certificate)) + } } impl BFT { /// Starts the BFT handlers. fn start_handlers(&self, bft_receiver: BFTReceiver) { let BFTReceiver { - mut rx_last_election_certificate_ids, mut rx_primary_round, mut rx_primary_certificate, mut rx_sync_bft_dag_at_bootup, mut rx_sync_bft, } = bft_receiver; - // Process the request for the last election certificate IDs. - let self_ = self.clone(); - self.spawn(async move { - while let Some(callback) = rx_last_election_certificate_ids.recv().await { - // Retrieve the last election certificate IDs, and send them to the callback. - callback.send(self_.last_election_certificate_ids()).ok(); - } - }); - // Process the current round from the primary. let self_ = self.clone(); self.spawn(async move { @@ -703,7 +793,7 @@ impl BFT { self.spawn(async move { while let Some((certificate, callback)) = rx_primary_certificate.recv().await { // Update the DAG with the certificate. - let result = self_.update_dag::(certificate).await; + let result = self_.update_dag::(certificate).await; // Send the callback **after** updating the DAG. // Note: We must await the DAG update before proceeding. callback.send(result).ok(); @@ -713,8 +803,8 @@ impl BFT { // Process the request to sync the BFT DAG at bootup. let self_ = self.clone(); self.spawn(async move { - while let Some((leader_certificates, certificates)) = rx_sync_bft_dag_at_bootup.recv().await { - self_.sync_bft_dag_at_bootup(leader_certificates, certificates).await; + while let Some(certificates) = rx_sync_bft_dag_at_bootup.recv().await { + self_.sync_bft_dag_at_bootup(certificates).await; } }); @@ -723,7 +813,7 @@ impl BFT { self.spawn(async move { while let Some((certificate, callback)) = rx_sync_bft.recv().await { // Update the DAG with the certificate. - let result = self_.update_dag::(certificate).await; + let result = self_.update_dag::(certificate).await; // Send the callback **after** updating the DAG. // Note: We must await the DAG update before proceeding. callback.send(result).ok(); @@ -731,62 +821,19 @@ impl BFT { }); } - /// Syncs the BFT DAG with the given leader certificates and batch certificates. + /// Syncs the BFT DAG with the given batch certificates. These batch certificates **must** + /// already exist in the ledger. /// - /// This method starts by inserting all certificates (except the latest leader certificate) - /// into the DAG. Then, it commits all leader certificates (except the latest leader certificate). - /// Finally, it updates the DAG with the latest leader certificate. - async fn sync_bft_dag_at_bootup( - &self, - leader_certificates: Vec<(BatchCertificate, IndexSet>)>, - certificates: Vec>, - ) { - // Split the leader certificates into past leader certificates, the latest leader certificate, and the election certificate IDs. - let (past_leader_certificates, leader_certificate, election_certificate_ids) = { - // Compute the penultimate index. - let index = leader_certificates.len().saturating_sub(1); - // Split the leader certificates. - let (past, latest) = leader_certificates.split_at(index); - debug_assert!(latest.len() == 1, "There should only be one latest leader certificate"); - // Retrieve the latest leader certificate. - match latest.first() { - Some((leader_certificate, election_certificate_ids)) => { - (past, leader_certificate.clone(), election_certificate_ids.clone()) - } - // If there is no latest leader certificate, return early. - None => return, - } - }; - { - // Acquire the BFT write lock. - let mut dag = self.dag.write(); - // Iterate over the certificates. - for certificate in certificates { - // If the certificate is not the latest leader certificate, insert it. - if leader_certificate.id() != certificate.id() { - // Insert the certificate into the DAG. - dag.insert(certificate); - } - } - - // Acquire the last election certificate IDs. - let mut last_election_certificate_ids = self.last_election_certificate_ids.write(); - // Iterate over the leader certificates. - for (leader_certificate, election_certificate_ids) in past_leader_certificates { - // Commit the leader certificate. - dag.commit(leader_certificate, self.storage().max_gc_rounds()); - // Update the last election certificate IDs. - // - // Note: Because we will be committing the latest leader certificate after this, - // technically we do not need to be updating the last election certificate IDs - // for intermediate leader certificates. However, this is a safety mechanic to ensure completeness. - *last_election_certificate_ids = election_certificate_ids.clone(); - } - } - // Commit the latest leader certificate. - if let Err(e) = self.commit_leader_certificate::(leader_certificate, election_certificate_ids).await - { - error!("BFT failed to update the DAG with the latest leader certificate - {e}"); + /// This method commits all the certificates into the DAG. + /// Note that there is no need to insert the certificates into the DAG, because these certificates + /// already exist in the ledger and therefore do not need to be re-ordered into future committed subdags. + async fn sync_bft_dag_at_bootup(&self, certificates: Vec>) { + // Acquire the BFT write lock. + let mut dag = self.dag.write(); + + // Commit all the certificates excluding the latest leader certificate. + for certificate in certificates { + dag.commit(&certificate, self.storage().max_gc_rounds()); } } @@ -809,14 +856,12 @@ impl BFT { #[cfg(test)] mod tests { - use crate::{ - helpers::{now, Storage}, - BFT, - }; + use crate::{helpers::Storage, BFT, MAX_LEADER_CERTIFICATE_DELAY_IN_SECS}; use snarkos_account::Account; use snarkos_node_bft_ledger_service::MockLedgerService; use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ + console::account::{Address, PrivateKey}, ledger::{ committee::Committee, narwhal::batch_certificate::test_helpers::{sample_batch_certificate, sample_batch_certificate_for_round}, @@ -825,10 +870,10 @@ mod tests { }; use anyhow::Result; - use indexmap::IndexSet; - use std::sync::{atomic::Ordering, Arc}; + use indexmap::{IndexMap, IndexSet}; + use std::sync::Arc; - type CurrentNetwork = snarkvm::console::network::Testnet3; + type CurrentNetwork = snarkvm::console::network::MainnetV0; /// Samples a new test instance, with an optional committee round and the given maximum GC rounds. fn sample_test_instance( @@ -858,34 +903,52 @@ mod tests { fn test_is_leader_quorum_odd() -> Result<()> { let rng = &mut TestRng::default(); - // Sample the test instance. - let (_, account, ledger, storage) = sample_test_instance(None, 10, rng); - assert_eq!(storage.max_gc_rounds(), 10); + // Sample batch certificates. + let mut certificates = IndexSet::new(); + certificates.insert(snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_for_round_with_previous_certificate_ids(1, IndexSet::new(), rng)); + certificates.insert(snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_for_round_with_previous_certificate_ids(1, IndexSet::new(), rng)); + certificates.insert(snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_for_round_with_previous_certificate_ids(1, IndexSet::new(), rng)); + certificates.insert(snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_for_round_with_previous_certificate_ids(1, IndexSet::new(), rng)); - // Initialize the BFT. - let bft = BFT::new(account, storage, ledger, None, &[], None)?; - assert!(bft.is_timer_expired()); // 0 + 5 < now() + // Initialize the committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + 1, + vec![ + certificates[0].author(), + certificates[1].author(), + certificates[2].author(), + certificates[3].author(), + ], + rng, + ); + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), 10); + // Initialize the account. + let account = Account::new(rng)?; + // Initialize the BFT. + let bft = BFT::new(account.clone(), storage.clone(), ledger.clone(), None, &[], None)?; + assert!(bft.is_timer_expired()); + // Ensure this call succeeds on an odd round. + let result = bft.is_leader_quorum_or_nonleaders_available(1); + // If timer has expired but quorum threshold is not reached, return 'false'. + assert!(!result); + // Insert certificates into storage. + for certificate in certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } // Ensure this call succeeds on an odd round. let result = bft.is_leader_quorum_or_nonleaders_available(1); assert!(result); // no previous leader certificate - // Set the leader certificate. let leader_certificate = sample_batch_certificate(rng); *bft.leader_certificate.write() = Some(leader_certificate); - // Ensure this call succeeds on an odd round. let result = bft.is_leader_quorum_or_nonleaders_available(1); assert!(result); // should now fall through to the end of function - // Set the timer to now(). - bft.leader_certificate_timer.store(now(), Ordering::SeqCst); - assert!(!bft.is_timer_expired()); - - // Ensure this call succeeds on an odd round. - let result = bft.is_leader_quorum_or_nonleaders_available(1); - // Should now return false, as the timer is not expired. - assert!(!result); // should now fall through to end of function Ok(()) } @@ -937,25 +1000,62 @@ mod tests { fn test_is_even_round_ready() -> Result<()> { let rng = &mut TestRng::default(); - // Sample the test instance. - let (committee, account, ledger, storage) = sample_test_instance(Some(2), 10, rng); - assert_eq!(committee.starting_round(), 2); - assert_eq!(storage.current_round(), 2); - assert_eq!(storage.max_gc_rounds(), 10); - - // Initialize the BFT. - let bft = BFT::new(account, storage, ledger, None, &[], None)?; + // Sample batch certificates. + let mut certificates = IndexSet::new(); + certificates.insert(sample_batch_certificate_for_round(2, rng)); + certificates.insert(sample_batch_certificate_for_round(2, rng)); + certificates.insert(sample_batch_certificate_for_round(2, rng)); + certificates.insert(sample_batch_certificate_for_round(2, rng)); - let result = bft.is_even_round_ready_for_next_round(IndexSet::new(), committee.clone(), 2); - assert!(!result); + // Initialize the committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + 2, + vec![ + certificates[0].author(), + certificates[1].author(), + certificates[2].author(), + certificates[3].author(), + ], + rng, + ); + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), 10); + // Initialize the account. + let account = Account::new(rng)?; + // Initialize the BFT. + let bft = BFT::new(account.clone(), storage.clone(), ledger.clone(), None, &[], None)?; // Set the leader certificate. let leader_certificate = sample_batch_certificate_for_round(2, rng); *bft.leader_certificate.write() = Some(leader_certificate); - - let result = bft.is_even_round_ready_for_next_round(IndexSet::new(), committee, 2); - // If leader certificate is set, we should be ready for next round. + let result = bft.is_even_round_ready_for_next_round(IndexSet::new(), committee.clone(), 2); + // If leader certificate is set but quorum threshold is not reached, we are not ready for the next round. + assert!(!result); + // Once quorum threshold is reached, we are ready for the next round. + let result = bft.is_even_round_ready_for_next_round(certificates.clone(), committee.clone(), 2); assert!(result); + + // Initialize a new BFT. + let bft_timer = BFT::new(account.clone(), storage.clone(), ledger.clone(), None, &[], None)?; + // If the leader certificate is not set and the timer has not expired, we are not ready for the next round. + let result = bft_timer.is_even_round_ready_for_next_round(certificates.clone(), committee.clone(), 2); + if !bft_timer.is_timer_expired() { + assert!(!result); + } + // Wait for the timer to expire. + let leader_certificate_timeout = + std::time::Duration::from_millis(MAX_LEADER_CERTIFICATE_DELAY_IN_SECS as u64 * 1000); + std::thread::sleep(leader_certificate_timeout); + // Once the leader certificate timer has expired and quorum threshold is reached, we are ready to advance to the next round. + let result = bft_timer.is_even_round_ready_for_next_round(certificates.clone(), committee.clone(), 2); + if bft_timer.is_timer_expired() { + assert!(result); + } else { + assert!(!result); + } + Ok(()) } @@ -1084,7 +1184,7 @@ mod tests { // Insert the previous certificates into the BFT. for certificate in previous_certificates.clone() { - assert!(bft.update_dag::(certificate).await.is_ok()); + assert!(bft.update_dag::(certificate).await.is_ok()); } // Ensure this call succeeds and returns all given certificates. @@ -1114,7 +1214,7 @@ mod tests { // Insert the previous certificates into the BFT. for certificate in previous_certificates.clone() { - assert!(bft.update_dag::(certificate).await.is_ok()); + assert!(bft.update_dag::(certificate).await.is_ok()); } // Ensure this call succeeds and returns all given certificates. @@ -1179,4 +1279,527 @@ mod tests { assert_eq!(result.unwrap_err().to_string(), error_msg); Ok(()) } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_bft_gc_on_commit() -> Result<()> { + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = 1; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + + // Sample the certificates. + let (_, certificates) = snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_with_previous_certificates( + current_round, + rng, + ); + + // Initialize the committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + vec![ + certificates[0].author(), + certificates[1].author(), + certificates[2].author(), + certificates[3].author(), + ], + rng, + ); + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + + // Initialize the storage. + let transmissions = Arc::new(BFTMemoryService::new()); + let storage = Storage::new(ledger.clone(), transmissions, max_gc_rounds); + // Insert the certificates into the storage. + for certificate in certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Get the leader certificate. + let leader = committee.get_leader(commit_round).unwrap(); + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader).unwrap(); + + // Initialize the BFT. + let account = Account::new(rng)?; + let bft = BFT::new(account, storage.clone(), ledger, None, &[], None)?; + // Insert a mock DAG in the BFT. + *bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(commit_round); + + // Ensure that the `gc_round` has not been updated yet. + assert_eq!(bft.storage().gc_round(), committee_round.saturating_sub(max_gc_rounds)); + + // Insert the certificates into the BFT. + for certificate in certificates { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + + // Commit the leader certificate. + bft.commit_leader_certificate::(leader_certificate).await.unwrap(); + + // Ensure that the `gc_round` has been updated. + assert_eq!(bft.storage().gc_round(), commit_round - max_gc_rounds); + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_sync_bft_dag_at_bootup() -> Result<()> { + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = 1; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + + // Sample the current certificate and previous certificates. + let (_, certificates) = snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate_with_previous_certificates( + current_round, + rng, + ); + + // Initialize the committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + vec![ + certificates[0].author(), + certificates[1].author(), + certificates[2].author(), + certificates[3].author(), + ], + rng, + ); + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Insert the certificates into the storage. + for certificate in certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Get the leader certificate. + let leader = committee.get_leader(commit_round).unwrap(); + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader).unwrap(); + + // Initialize the BFT. + let account = Account::new(rng)?; + let bft = BFT::new(account.clone(), storage, ledger.clone(), None, &[], None)?; + + // Insert a mock DAG in the BFT. + *bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(commit_round); + + // Insert the previous certificates into the BFT. + for certificate in certificates.clone() { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + + // Commit the leader certificate. + bft.commit_leader_certificate::(leader_certificate.clone()).await.unwrap(); + + // Simulate a bootup of the BFT. + + // Initialize a new instance of storage. + let storage_2 = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Initialize a new instance of BFT. + let bootup_bft = BFT::new(account, storage_2, ledger, None, &[], None)?; + + // Sync the BFT DAG at bootup. + bootup_bft.sync_bft_dag_at_bootup(certificates.clone()).await; + + // Check that the BFT starts from the same last committed round. + assert_eq!(bft.dag.read().last_committed_round(), bootup_bft.dag.read().last_committed_round()); + + // Ensure that both BFTs have committed the leader certificate. + assert!(bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + assert!(bootup_bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + + // Check the state of the bootup BFT. + for certificate in certificates { + let certificate_round = certificate.round(); + let certificate_id = certificate.id(); + // Check that the bootup BFT has committed the certificates. + assert!(bootup_bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + // Check that the bootup BFT does not contain the certificates in its graph, because + // it should not need to order them again in subsequent subdags. + assert!(!bootup_bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + } + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_sync_bft_dag_at_bootup_shutdown() -> Result<()> { + /* + 1. Run one uninterrupted BFT on a set of certificates for 2 leader commits. + 2. Run a separate bootup BFT that syncs with a set of pre shutdown certificates, and then commits a second leader normally over a set of post shutdown certificates. + 3. Observe that the uninterrupted BFT and the bootup BFT end in the same state. + */ + + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = snarkvm::ledger::narwhal::BatchHeader::::MAX_GC_ROUNDS as u64; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + let next_round = current_round + 1; + + // Sample 5 rounds of batch certificates starting at the genesis round from a static set of 4 authors. + let (round_to_certificates_map, committee) = { + let private_keys = vec![ + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + ]; + let addresses = vec![ + Address::try_from(private_keys[0])?, + Address::try_from(private_keys[1])?, + Address::try_from(private_keys[2])?, + Address::try_from(private_keys[3])?, + ]; + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + addresses, + rng, + ); + // Initialize a mapping from the round number to the set of batch certificates in the round. + let mut round_to_certificates_map: IndexMap< + u64, + IndexSet>, + > = IndexMap::new(); + let mut previous_certificates = IndexSet::with_capacity(4); + // Initialize the genesis batch certificates. + for _ in 0..4 { + previous_certificates.insert(sample_batch_certificate(rng)); + } + for round in 0..commit_round + 3 { + let mut current_certificates = IndexSet::new(); + let previous_certificate_ids: IndexSet<_> = if round == 0 || round == 1 { + IndexSet::new() + } else { + previous_certificates.iter().map(|c| c.id()).collect() + }; + let transmission_ids = + snarkvm::ledger::narwhal::transmission_id::test_helpers::sample_transmission_ids(rng) + .into_iter() + .collect::>(); + let timestamp = time::OffsetDateTime::now_utc().unix_timestamp(); + let committee_id = committee.id(); + for (i, private_key_1) in private_keys.iter().enumerate() { + let batch_header = snarkvm::ledger::narwhal::BatchHeader::new( + private_key_1, + round, + timestamp, + committee_id, + transmission_ids.clone(), + previous_certificate_ids.clone(), + rng, + ) + .unwrap(); + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + let certificate = + snarkvm::ledger::narwhal::BatchCertificate::from(batch_header, signatures).unwrap(); + current_certificates.insert(certificate); + } + // Update the mapping. + round_to_certificates_map.insert(round, current_certificates.clone()); + previous_certificates = current_certificates.clone(); + } + (round_to_certificates_map, committee) + }; + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Get the leaders for the next 2 commit rounds. + let leader = committee.get_leader(commit_round).unwrap(); + let next_leader = committee.get_leader(next_round).unwrap(); + // Insert the pre shutdown certificates into the storage. + let mut pre_shutdown_certificates: Vec> = Vec::new(); + for i in 1..=commit_round { + let certificates = (*round_to_certificates_map.get(&i).unwrap()).clone(); + if i == commit_round { + // Only insert the leader certificate for the commit round. + let leader_certificate = certificates.iter().find(|certificate| certificate.author() == leader); + if let Some(c) = leader_certificate { + pre_shutdown_certificates.push(c.clone()); + } + continue; + } + pre_shutdown_certificates.extend(certificates); + } + for certificate in pre_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Insert the post shutdown certificates into the storage. + let mut post_shutdown_certificates: Vec> = + Vec::new(); + for j in commit_round..=commit_round + 2 { + let certificate = (*round_to_certificates_map.get(&j).unwrap()).clone(); + post_shutdown_certificates.extend(certificate); + } + for certificate in post_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Get the leader certificates. + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader).unwrap(); + let next_leader_certificate = storage.get_certificate_for_round_with_author(next_round, next_leader).unwrap(); + + // Initialize the BFT without bootup. + let account = Account::new(rng)?; + let bft = BFT::new(account.clone(), storage, ledger.clone(), None, &[], None)?; + + // Insert a mock DAG in the BFT without bootup. + *bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(0); + + // Insert the certificates into the BFT without bootup. + for certificate in pre_shutdown_certificates.clone() { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + + // Insert the post shutdown certificates into the BFT without bootup. + for certificate in post_shutdown_certificates.clone() { + assert!(bft.update_dag::(certificate).await.is_ok()); + } + // Commit the second leader certificate. + let commit_subdag = bft.order_dag_with_dfs::(next_leader_certificate.clone()).unwrap(); + let commit_subdag_metadata = commit_subdag.iter().map(|(round, c)| (*round, c.len())).collect::>(); + bft.commit_leader_certificate::(next_leader_certificate.clone()).await.unwrap(); + + // Simulate a bootup of the BFT. + + // Initialize a new instance of storage. + let bootup_storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + + // Initialize a new instance of BFT with bootup. + let bootup_bft = BFT::new(account, bootup_storage.clone(), ledger.clone(), None, &[], None)?; + + // Sync the BFT DAG at bootup. + bootup_bft.sync_bft_dag_at_bootup(pre_shutdown_certificates.clone()).await; + + // Insert the post shutdown certificates to the storage and BFT with bootup. + for certificate in post_shutdown_certificates.iter() { + bootup_bft.storage().testing_only_insert_certificate_testing_only(certificate.clone()); + } + for certificate in post_shutdown_certificates.clone() { + assert!(bootup_bft.update_dag::(certificate).await.is_ok()); + } + // Commit the second leader certificate. + let commit_subdag_bootup = bootup_bft.order_dag_with_dfs::(next_leader_certificate.clone()).unwrap(); + let commit_subdag_metadata_bootup = + commit_subdag_bootup.iter().map(|(round, c)| (*round, c.len())).collect::>(); + let committed_certificates_bootup = commit_subdag_bootup.values().flatten(); + bootup_bft.commit_leader_certificate::(next_leader_certificate.clone()).await.unwrap(); + + // Check that the final state of both BFTs is the same. + + // Check that both BFTs start from the same last committed round. + assert_eq!(bft.dag.read().last_committed_round(), bootup_bft.dag.read().last_committed_round()); + + // Ensure that both BFTs have committed the leader certificates. + assert!(bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + assert!(bft.dag.read().is_recently_committed(next_leader_certificate.round(), next_leader_certificate.id())); + assert!(bootup_bft.dag.read().is_recently_committed(leader_certificate.round(), leader_certificate.id())); + assert!( + bootup_bft.dag.read().is_recently_committed(next_leader_certificate.round(), next_leader_certificate.id()) + ); + + // Check that the bootup BFT has committed the pre shutdown certificates. + for certificate in pre_shutdown_certificates.clone() { + let certificate_round = certificate.round(); + let certificate_id = certificate.id(); + // Check that both BFTs have committed the certificates. + assert!(bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + assert!(bootup_bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + // Check that the bootup BFT does not contain the certificates in its graph, because + // it should not need to order them again in subsequent subdags. + assert!(!bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + assert!(!bootup_bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + } + + // Check that that the bootup BFT has committed the subdag stemming from the second leader certificate in consensus. + for certificate in committed_certificates_bootup.clone() { + let certificate_round = certificate.round(); + let certificate_id = certificate.id(); + // Check that the both BFTs have committed the certificates. + assert!(bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + assert!(bootup_bft.dag.read().is_recently_committed(certificate_round, certificate_id)); + // Check that the bootup BFT does not contain the certificates in its graph, because + // it should not need to order them again in subsequent subdags. + assert!(!bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + assert!(!bootup_bft.dag.read().contains_certificate_in_round(certificate_round, certificate_id)); + } + + // Check that the commit subdag metadata for the second leader is the same for both BFTs. + assert_eq!(commit_subdag_metadata_bootup, commit_subdag_metadata); + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_sync_bft_dag_at_bootup_dfs() -> Result<()> { + /* + 1. Run a bootup BFT that syncs with a set of pre shutdown certificates. + 2. Add post shutdown certificates to the bootup BFT. + 2. Observe that in the commit subdag of the second leader certificate, there are no repeated vertices from the pre shutdown certificates. + */ + + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let max_gc_rounds = snarkvm::ledger::narwhal::BatchHeader::::MAX_GC_ROUNDS as u64; + let committee_round = 0; + let commit_round = 2; + let current_round = commit_round + 1; + let next_round = current_round + 1; + + // Sample 5 rounds of batch certificates starting at the genesis round from a static set of 4 authors. + let (round_to_certificates_map, committee) = { + let private_keys = vec![ + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + PrivateKey::new(rng).unwrap(), + ]; + let addresses = vec![ + Address::try_from(private_keys[0])?, + Address::try_from(private_keys[1])?, + Address::try_from(private_keys[2])?, + Address::try_from(private_keys[3])?, + ]; + let committee = snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_members( + committee_round, + addresses, + rng, + ); + // Initialize a mapping from the round number to the set of batch certificates in the round. + let mut round_to_certificates_map: IndexMap< + u64, + IndexSet>, + > = IndexMap::new(); + let mut previous_certificates = IndexSet::with_capacity(4); + // Initialize the genesis batch certificates. + for _ in 0..4 { + previous_certificates.insert(sample_batch_certificate(rng)); + } + for round in 0..=commit_round + 2 { + let mut current_certificates = IndexSet::new(); + let previous_certificate_ids: IndexSet<_> = if round == 0 || round == 1 { + IndexSet::new() + } else { + previous_certificates.iter().map(|c| c.id()).collect() + }; + let transmission_ids = + snarkvm::ledger::narwhal::transmission_id::test_helpers::sample_transmission_ids(rng) + .into_iter() + .collect::>(); + let timestamp = time::OffsetDateTime::now_utc().unix_timestamp(); + let committee_id = committee.id(); + for (i, private_key_1) in private_keys.iter().enumerate() { + let batch_header = snarkvm::ledger::narwhal::BatchHeader::new( + private_key_1, + round, + timestamp, + committee_id, + transmission_ids.clone(), + previous_certificate_ids.clone(), + rng, + ) + .unwrap(); + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + let certificate = + snarkvm::ledger::narwhal::BatchCertificate::from(batch_header, signatures).unwrap(); + current_certificates.insert(certificate); + } + // Update the mapping. + round_to_certificates_map.insert(round, current_certificates.clone()); + previous_certificates = current_certificates.clone(); + } + (round_to_certificates_map, committee) + }; + + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Get the leaders for the next 2 commit rounds. + let leader = committee.get_leader(commit_round).unwrap(); + let next_leader = committee.get_leader(next_round).unwrap(); + // Insert the pre shutdown certificates into the storage. + let mut pre_shutdown_certificates: Vec> = Vec::new(); + for i in 1..=commit_round { + let certificates = (*round_to_certificates_map.get(&i).unwrap()).clone(); + if i == commit_round { + // Only insert the leader certificate for the commit round. + let leader_certificate = certificates.iter().find(|certificate| certificate.author() == leader); + if let Some(c) = leader_certificate { + pre_shutdown_certificates.push(c.clone()); + } + continue; + } + pre_shutdown_certificates.extend(certificates); + } + for certificate in pre_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Initialize the bootup BFT. + let account = Account::new(rng)?; + let bootup_bft = BFT::new(account.clone(), storage.clone(), ledger.clone(), None, &[], None)?; + // Insert a mock DAG in the BFT without bootup. + *bootup_bft.dag.write() = crate::helpers::dag::test_helpers::mock_dag_with_modified_last_committed_round(0); + // Sync the BFT DAG at bootup. + bootup_bft.sync_bft_dag_at_bootup(pre_shutdown_certificates.clone()).await; + + // Insert the post shutdown certificates into the storage. + let mut post_shutdown_certificates: Vec> = + Vec::new(); + for j in commit_round..=commit_round + 2 { + let certificate = (*round_to_certificates_map.get(&j).unwrap()).clone(); + post_shutdown_certificates.extend(certificate); + } + for certificate in post_shutdown_certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Insert the post shutdown certificates into the DAG. + for certificate in post_shutdown_certificates.clone() { + assert!(bootup_bft.update_dag::(certificate).await.is_ok()); + } + + // Get the next leader certificate to commit. + let next_leader_certificate = storage.get_certificate_for_round_with_author(next_round, next_leader).unwrap(); + let commit_subdag = bootup_bft.order_dag_with_dfs::(next_leader_certificate).unwrap(); + let committed_certificates = commit_subdag.values().flatten(); + + // Check that none of the certificates synced from the bootup appear in the subdag for the next commit round. + for pre_shutdown_certificate in pre_shutdown_certificates.clone() { + for committed_certificate in committed_certificates.clone() { + assert_ne!(pre_shutdown_certificate.id(), committed_certificate.id()); + } + } + Ok(()) + } } diff --git a/node/bft/src/gateway.rs b/node/bft/src/gateway.rs index 44f076514c..0e404ff635 100644 --- a/node/bft/src/gateway.rs +++ b/node/bft/src/gateway.rs @@ -14,13 +14,11 @@ use crate::{ events::{EventCodec, PrimaryPing}, - helpers::{assign_to_worker, Cache, PrimarySender, Resolver, SyncSender, WorkerSender}, + helpers::{assign_to_worker, Cache, PrimarySender, Resolver, Storage, SyncSender, WorkerSender}, spawn_blocking, + Worker, CONTEXT, MAX_BATCH_DELAY_IN_MS, - MAX_GC_ROUNDS, - MAX_TRANSMISSIONS_PER_BATCH, - MAX_TRANSMISSIONS_PER_WORKER_PING, MEMORY_POOL_PORT, }; use snarkos_account::Account; @@ -41,7 +39,7 @@ use snarkos_node_bft_events::{ ValidatorsResponse, }; use snarkos_node_bft_ledger_service::LedgerService; -use snarkos_node_sync::communication_service::CommunicationService; +use snarkos_node_sync::{communication_service::CommunicationService, MAX_BLOCKS_BEHIND}; use snarkos_node_tcp::{ is_bogon_ip, is_unspecified_or_broadcast_ip, @@ -54,7 +52,10 @@ use snarkos_node_tcp::{ }; use snarkvm::{ console::prelude::*, - ledger::{committee::Committee, narwhal::Data}, + ledger::{ + committee::Committee, + narwhal::{BatchHeader, Data}, + }, prelude::Address, }; @@ -99,6 +100,8 @@ pub trait Transport: Send + Sync { pub struct Gateway { /// The account of the node. account: Account, + /// The storage. + storage: Storage, /// The ledger service. ledger: Arc>, /// The TCP stack. @@ -132,6 +135,7 @@ impl Gateway { /// Initializes a new gateway. pub fn new( account: Account, + storage: Storage, ledger: Arc>, ip: Option, trusted_validators: &[SocketAddr], @@ -148,6 +152,7 @@ impl Gateway { // Return the gateway. Ok(Self { account, + storage, ledger, tcp, cache: Default::default(), @@ -215,12 +220,12 @@ impl Gateway { /// The maximum number of certificate requests to cache. fn max_cache_certificates(&self) -> usize { - 2 * MAX_GC_ROUNDS as usize * self.max_committee_size() + 2 * BatchHeader::::MAX_GC_ROUNDS * self.max_committee_size() } /// The maximum number of transmission requests to cache. fn max_cache_transmissions(&self) -> usize { - self.max_cache_certificates() * MAX_TRANSMISSIONS_PER_BATCH + self.max_cache_certificates() * BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH } /// The maximum number of duplicates for any particular request. @@ -256,6 +261,11 @@ impl Gateway { &self.account } + /// Returns the dev identifier of the node. + pub const fn dev(&self) -> Option { + self.dev + } + /// Returns the IP address of this node. pub fn local_ip(&self) -> SocketAddr { self.tcp.listening_addr().expect("The TCP listener is not enabled") @@ -329,18 +339,38 @@ impl Gateway { /// Returns `true` if the given address is an authorized validator. pub fn is_authorized_validator_address(&self, validator_address: Address) -> bool { - // Determine if the validator address is a member of the previous or current committee. + // Determine if the validator address is a member of the committee lookback, + // the current committee, or the previous committee lookbacks. // We allow leniency in this validation check in order to accommodate these two scenarios: // 1. New validators should be able to connect immediately once bonded as a committee member. // 2. Existing validators must remain connected until they are no longer bonded as a committee member. // (i.e. meaning they must stay online until the next block has been produced) - self.ledger - .get_previous_committee_for_round(self.ledger.latest_round()) + + // Determine if the validator is in the current committee with lookback. + if self + .ledger + .get_committee_lookback_for_round(self.storage.current_round()) .map_or(false, |committee| committee.is_committee_member(validator_address)) - || self - .ledger - .current_committee() - .map_or(false, |committee| committee.is_committee_member(validator_address)) + { + return true; + } + + // Determine if the validator is in the latest committee on the ledger. + if self.ledger.current_committee().map_or(false, |committee| committee.is_committee_member(validator_address)) { + return true; + } + + // Retrieve the previous block height to consider from the sync tolerance. + let previous_block_height = self.ledger.latest_block_height().saturating_sub(MAX_BLOCKS_BEHIND); + // Determine if the validator is in any of the previous committee lookbacks. + match self.ledger.get_block_round(previous_block_height) { + Ok(block_round) => (block_round..self.storage.current_round()).step_by(2).any(|round| { + self.ledger + .get_committee_lookback_for_round(round) + .map_or(false, |committee| committee.is_committee_member(validator_address)) + }), + Err(_) => false, + } } /// Returns the maximum number of connected peers. @@ -606,7 +636,9 @@ impl Gateway { // Ensure the block response is well-formed. blocks.ensure_response_is_well_formed(peer_ip, request.start_height, request.end_height)?; // Send the blocks to the sync module. - return sync_sender.advance_with_sync_blocks(peer_ip, blocks.0).await; + if let Err(e) = sync_sender.advance_with_sync_blocks(peer_ip, blocks.0).await { + warn!("Unable to process block response from '{peer_ip}' - {e}"); + } } Ok(()) } @@ -634,7 +666,7 @@ impl Gateway { bail!("{CONTEXT} {:?}", disconnect.reason) } Event::PrimaryPing(ping) => { - let PrimaryPing { version, block_locators, primary_certificate, batch_certificates } = ping; + let PrimaryPing { version, block_locators, primary_certificate } = ping; // Ensure the event version is not outdated. if version < Event::::VERSION { @@ -650,11 +682,7 @@ impl Gateway { } // Send the batch certificates to the primary. - let _ = self - .primary_sender() - .tx_primary_ping - .send((peer_ip, primary_certificate, batch_certificates)) - .await; + let _ = self.primary_sender().tx_primary_ping.send((peer_ip, primary_certificate)).await; Ok(()) } Event::TransmissionRequest(request) => { @@ -770,7 +798,7 @@ impl Gateway { Event::WorkerPing(ping) => { // Ensure the number of transmissions is not too large. ensure!( - ping.transmission_ids.len() <= MAX_TRANSMISSIONS_PER_WORKER_PING, + ping.transmission_ids.len() <= Worker::::MAX_TRANSMISSIONS_PER_WORKER_PING, "{CONTEXT} Received too many transmissions" ); // Retrieve the number of workers. @@ -1004,8 +1032,10 @@ impl Reading for Gateway { type Message = Event; /// The maximum queue depth of incoming messages for a single peer. - const MESSAGE_QUEUE_DEPTH: usize = - 2 * MAX_GC_ROUNDS as usize * Committee::::MAX_COMMITTEE_SIZE as usize * MAX_TRANSMISSIONS_PER_BATCH; + const MESSAGE_QUEUE_DEPTH: usize = 2 + * BatchHeader::::MAX_GC_ROUNDS + * Committee::::MAX_COMMITTEE_SIZE as usize + * BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH; /// Creates a [`Decoder`] used to interpret messages from the network. /// The `side` param indicates the connection side **from the node's perspective**. @@ -1037,8 +1067,10 @@ impl Writing for Gateway { type Message = Event; /// The maximum queue depth of outgoing messages for a single peer. - const MESSAGE_QUEUE_DEPTH: usize = - 2 * MAX_GC_ROUNDS as usize * Committee::::MAX_COMMITTEE_SIZE as usize * MAX_TRANSMISSIONS_PER_BATCH; + const MESSAGE_QUEUE_DEPTH: usize = 2 + * BatchHeader::::MAX_GC_ROUNDS + * Committee::::MAX_COMMITTEE_SIZE as usize + * BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH; /// Creates an [`Encoder`] used to write the outbound messages to the target stream. /// The `side` parameter indicates the connection side **from the node's perspective**. @@ -1193,11 +1225,13 @@ impl Gateway { /* Step 3: Send the challenge response. */ // Sign the counterparty nonce. - let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else { + let response_nonce: u64 = rng.gen(); + let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat(); + let Ok(our_signature) = self.account.sign_bytes(&data, rng) else { return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'"))); }; // Send the challenge response. - let our_response = ChallengeResponse { signature: Data::Object(our_signature) }; + let our_response = ChallengeResponse { signature: Data::Object(our_signature), nonce: response_nonce }; send_event(&mut framed, peer_addr, Event::ChallengeResponse(our_response)).await?; // Add the peer to the gateway. @@ -1246,11 +1280,13 @@ impl Gateway { let rng = &mut rand::rngs::OsRng; // Sign the counterparty nonce. - let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else { + let response_nonce: u64 = rng.gen(); + let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat(); + let Ok(our_signature) = self.account.sign_bytes(&data, rng) else { return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'"))); }; // Send the challenge response. - let our_response = ChallengeResponse { signature: Data::Object(our_signature) }; + let our_response = ChallengeResponse { signature: Data::Object(our_signature), nonce: response_nonce }; send_event(&mut framed, peer_addr, Event::ChallengeResponse(our_response)).await?; // Sample a random nonce. @@ -1307,14 +1343,14 @@ impl Gateway { expected_nonce: u64, ) -> Option { // Retrieve the components of the challenge response. - let ChallengeResponse { signature } = response; + let ChallengeResponse { signature, nonce } = response; // Perform the deferred non-blocking deserialization of the signature. let Ok(signature) = spawn_blocking!(signature.deserialize_blocking()) else { warn!("{CONTEXT} Gateway handshake with '{peer_addr}' failed (cannot deserialize the signature)"); return Some(DisconnectReason::InvalidChallengeResponse); }; // Verify the signature. - if !signature.verify_bytes(&peer_address, &expected_nonce.to_le_bytes()) { + if !signature.verify_bytes(&peer_address, &[expected_nonce.to_le_bytes(), nonce.to_le_bytes()].concat()) { warn!("{CONTEXT} Gateway handshake with '{peer_addr}' failed (invalid signature)"); return Some(DisconnectReason::InvalidChallengeResponse); } @@ -1334,16 +1370,22 @@ mod prop_tests { }; use snarkos_account::Account; use snarkos_node_bft_ledger_service::MockLedgerService; + use snarkos_node_bft_storage_service::BFTMemoryService; use snarkos_node_tcp::P2P; use snarkvm::{ - ledger::committee::{ - prop_tests::{CommitteeContext, ValidatorSet}, - Committee, + ledger::{ + committee::{ + prop_tests::{CommitteeContext, ValidatorSet}, + test_helpers::sample_committee_for_round_and_members, + Committee, + }, + narwhal::{batch_certificate::test_helpers::sample_batch_certificate_for_round, BatchHeader}, }, - prelude::{PrivateKey, Testnet3}, + prelude::{MainnetV0, PrivateKey}, + utilities::TestRng, }; - use indexmap::IndexMap; + use indexmap::{IndexMap, IndexSet}; use proptest::{ prelude::{any, any_with, Arbitrary, BoxedStrategy, Just, Strategy}, sample::Selector, @@ -1355,7 +1397,7 @@ mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; impl Debug for Gateway { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { @@ -1395,6 +1437,7 @@ mod prop_tests { .prop_map(|(storage, _, private_key, address)| { Gateway::new( Account::try_from(private_key).unwrap(), + storage.clone(), storage.ledger().clone(), address.ip(), &[], @@ -1443,7 +1486,9 @@ mod prop_tests { let (storage, _, private_key, dev) = input; let account = Account::try_from(private_key).unwrap(); - let gateway = Gateway::new(account.clone(), storage.ledger().clone(), dev.ip(), &[], dev.port()).unwrap(); + let gateway = + Gateway::new(account.clone(), storage.clone(), storage.ledger().clone(), dev.ip(), &[], dev.port()) + .unwrap(); let tcp_config = gateway.tcp().config(); assert_eq!(tcp_config.listener_ip, Some(IpAddr::V4(Ipv4Addr::LOCALHOST))); assert_eq!(tcp_config.desired_listening_port, Some(MEMORY_POOL_PORT + dev.port().unwrap())); @@ -1458,7 +1503,9 @@ mod prop_tests { let (storage, _, private_key, dev) = input; let account = Account::try_from(private_key).unwrap(); - let gateway = Gateway::new(account.clone(), storage.ledger().clone(), dev.ip(), &[], dev.port()).unwrap(); + let gateway = + Gateway::new(account.clone(), storage.clone(), storage.ledger().clone(), dev.ip(), &[], dev.port()) + .unwrap(); let tcp_config = gateway.tcp().config(); if let Some(socket_addr) = dev.ip() { assert_eq!(tcp_config.listener_ip, Some(socket_addr.ip())); @@ -1483,7 +1530,8 @@ mod prop_tests { let worker_storage = storage.clone(); let account = Account::try_from(private_key).unwrap(); - let gateway = Gateway::new(account, storage.ledger().clone(), dev.ip(), &[], dev.port()).unwrap(); + let gateway = + Gateway::new(account, storage.clone(), storage.ledger().clone(), dev.ip(), &[], dev.port()).unwrap(); let (primary_sender, _) = init_primary_channels(); @@ -1518,4 +1566,49 @@ mod prop_tests { ); assert_eq!(gateway.num_workers(), workers.len() as u8); } + + #[proptest] + fn test_is_authorized_validator(#[strategy(any_valid_dev_gateway())] input: GatewayInput) { + let rng = &mut TestRng::default(); + + // Initialize the round parameters. + let current_round = 2; + let committee_size = 4; + let max_gc_rounds = BatchHeader::::MAX_GC_ROUNDS as u64; + let (_, _, private_key, dev) = input; + let account = Account::try_from(private_key).unwrap(); + + // Sample the certificates. + let mut certificates = IndexSet::new(); + for _ in 0..committee_size { + certificates.insert(sample_batch_certificate_for_round(current_round, rng)); + } + let addresses: Vec<_> = certificates.iter().map(|certificate| certificate.author()).collect(); + // Initialize the committee. + let committee = sample_committee_for_round_and_members(current_round, addresses, rng); + // Sample extra certificates from non-committee members. + for _ in 0..committee_size { + certificates.insert(sample_batch_certificate_for_round(current_round, rng)); + } + // Initialize the ledger. + let ledger = Arc::new(MockLedgerService::new(committee.clone())); + // Initialize the storage. + let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Initialize the gateway. + let gateway = + Gateway::new(account.clone(), storage.clone(), ledger.clone(), dev.ip(), &[], dev.port()).unwrap(); + // Insert certificate to the storage. + for certificate in certificates.iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Check that the current committee members are authorized validators. + for i in 0..certificates.clone().len() { + let is_authorized = gateway.is_authorized_validator_address(certificates[i].author()); + if i < committee_size { + assert!(is_authorized); + } else { + assert!(!is_authorized); + } + } + } } diff --git a/node/bft/src/helpers/cache.rs b/node/bft/src/helpers/cache.rs index d3b7b4504f..7e6bafc0a9 100644 --- a/node/bft/src/helpers/cache.rs +++ b/node/bft/src/helpers/cache.rs @@ -191,11 +191,11 @@ impl Cache { #[cfg(test)] mod tests { use super::*; - use snarkvm::prelude::Testnet3; + use snarkvm::prelude::MainnetV0; use std::{net::Ipv4Addr, thread, time::Duration}; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; trait Input { fn input() -> Self; diff --git a/node/bft/src/helpers/channels.rs b/node/bft/src/helpers/channels.rs index 8881b1f0ca..70c6ad617b 100644 --- a/node/bft/src/helpers/channels.rs +++ b/node/bft/src/helpers/channels.rs @@ -22,16 +22,16 @@ use crate::events::{ }; use snarkos_node_sync::locators::BlockLocators; use snarkvm::{ - console::{network::*, types::Field}, + console::network::*, ledger::{ block::{Block, Transaction}, - coinbase::{ProverSolution, PuzzleCommitment}, narwhal::{BatchCertificate, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, prelude::Result, }; -use indexmap::{IndexMap, IndexSet}; +use indexmap::IndexMap; use std::net::SocketAddr; use tokio::sync::{mpsc, oneshot}; @@ -61,25 +61,13 @@ pub fn init_consensus_channels() -> (ConsensusSender, ConsensusRe #[derive(Clone, Debug)] pub struct BFTSender { - pub tx_last_election_certificate_ids: mpsc::Sender>>>, pub tx_primary_round: mpsc::Sender<(u64, oneshot::Sender)>, pub tx_primary_certificate: mpsc::Sender<(BatchCertificate, oneshot::Sender>)>, - pub tx_sync_bft_dag_at_bootup: - mpsc::Sender<(Vec<(BatchCertificate, IndexSet>)>, Vec>)>, + pub tx_sync_bft_dag_at_bootup: mpsc::Sender>>, pub tx_sync_bft: mpsc::Sender<(BatchCertificate, oneshot::Sender>)>, } impl BFTSender { - /// Retrieves the last election certificate IDs. - pub async fn get_last_election_certificate_ids(&self) -> Result>> { - // Initialize a callback sender and receiver. - let (callback_sender, callback_receiver) = oneshot::channel(); - // Send the request to get the last election certificate IDs. - self.tx_last_election_certificate_ids.send(callback_sender).await?; - // Await the callback to continue. - Ok(callback_receiver.await?) - } - /// Sends the current round to the BFT. pub async fn send_primary_round_to_bft(&self, current_round: u64) -> Result { // Initialize a callback sender and receiver. @@ -113,36 +101,21 @@ impl BFTSender { #[derive(Debug)] pub struct BFTReceiver { - pub rx_last_election_certificate_ids: mpsc::Receiver>>>, pub rx_primary_round: mpsc::Receiver<(u64, oneshot::Sender)>, pub rx_primary_certificate: mpsc::Receiver<(BatchCertificate, oneshot::Sender>)>, - pub rx_sync_bft_dag_at_bootup: - mpsc::Receiver<(Vec<(BatchCertificate, IndexSet>)>, Vec>)>, + pub rx_sync_bft_dag_at_bootup: mpsc::Receiver>>, pub rx_sync_bft: mpsc::Receiver<(BatchCertificate, oneshot::Sender>)>, } /// Initializes the BFT channels. pub fn init_bft_channels() -> (BFTSender, BFTReceiver) { - let (tx_last_election_certificate_ids, rx_last_election_certificate_ids) = mpsc::channel(MAX_CHANNEL_SIZE); let (tx_primary_round, rx_primary_round) = mpsc::channel(MAX_CHANNEL_SIZE); let (tx_primary_certificate, rx_primary_certificate) = mpsc::channel(MAX_CHANNEL_SIZE); let (tx_sync_bft_dag_at_bootup, rx_sync_bft_dag_at_bootup) = mpsc::channel(MAX_CHANNEL_SIZE); let (tx_sync_bft, rx_sync_bft) = mpsc::channel(MAX_CHANNEL_SIZE); - let sender = BFTSender { - tx_last_election_certificate_ids, - tx_primary_round, - tx_primary_certificate, - tx_sync_bft_dag_at_bootup, - tx_sync_bft, - }; - let receiver = BFTReceiver { - rx_last_election_certificate_ids, - rx_primary_round, - rx_primary_certificate, - rx_sync_bft_dag_at_bootup, - rx_sync_bft, - }; + let sender = BFTSender { tx_primary_round, tx_primary_certificate, tx_sync_bft_dag_at_bootup, tx_sync_bft }; + let receiver = BFTReceiver { rx_primary_round, rx_primary_certificate, rx_sync_bft_dag_at_bootup, rx_sync_bft }; (sender, receiver) } @@ -152,10 +125,8 @@ pub struct PrimarySender { pub tx_batch_propose: mpsc::Sender<(SocketAddr, BatchPropose)>, pub tx_batch_signature: mpsc::Sender<(SocketAddr, BatchSignature)>, pub tx_batch_certified: mpsc::Sender<(SocketAddr, Data>)>, - pub tx_primary_ping: - mpsc::Sender<(SocketAddr, Data>, IndexMap, Data>>)>, - pub tx_unconfirmed_solution: - mpsc::Sender<(PuzzleCommitment, Data>, oneshot::Sender>)>, + pub tx_primary_ping: mpsc::Sender<(SocketAddr, Data>)>, + pub tx_unconfirmed_solution: mpsc::Sender<(SolutionID, Data>, oneshot::Sender>)>, pub tx_unconfirmed_transaction: mpsc::Sender<(N::TransactionID, Data>, oneshot::Sender>)>, } @@ -163,8 +134,8 @@ impl PrimarySender { /// Sends the unconfirmed solution to the primary. pub async fn send_unconfirmed_solution( &self, - solution_id: PuzzleCommitment, - solution: Data>, + solution_id: SolutionID, + solution: Data>, ) -> Result<()> { // Initialize a callback sender and receiver. let (callback_sender, callback_receiver) = oneshot::channel(); @@ -194,10 +165,8 @@ pub struct PrimaryReceiver { pub rx_batch_propose: mpsc::Receiver<(SocketAddr, BatchPropose)>, pub rx_batch_signature: mpsc::Receiver<(SocketAddr, BatchSignature)>, pub rx_batch_certified: mpsc::Receiver<(SocketAddr, Data>)>, - pub rx_primary_ping: - mpsc::Receiver<(SocketAddr, Data>, IndexMap, Data>>)>, - pub rx_unconfirmed_solution: - mpsc::Receiver<(PuzzleCommitment, Data>, oneshot::Sender>)>, + pub rx_primary_ping: mpsc::Receiver<(SocketAddr, Data>)>, + pub rx_unconfirmed_solution: mpsc::Receiver<(SolutionID, Data>, oneshot::Sender>)>, pub rx_unconfirmed_transaction: mpsc::Receiver<(N::TransactionID, Data>, oneshot::Sender>)>, } diff --git a/node/bft/src/helpers/dag.rs b/node/bft/src/helpers/dag.rs index a97b31689a..2d64ded988 100644 --- a/node/bft/src/helpers/dag.rs +++ b/node/bft/src/helpers/dag.rs @@ -152,13 +152,13 @@ pub(crate) mod test_helpers { mod tests { use super::*; use snarkvm::{ - prelude::{narwhal::batch_certificate::test_helpers::sample_batch_certificate_for_round, Testnet3}, + prelude::{narwhal::batch_certificate::test_helpers::sample_batch_certificate_for_round, MainnetV0}, utilities::TestRng, }; #[test] fn test_dag_empty() { - let dag = DAG::::new(); + let dag = DAG::::new(); assert_eq!(dag.get_certificates_for_round(0), None); assert_eq!(dag.last_committed_round(), 0); @@ -167,7 +167,7 @@ mod tests { #[test] fn test_dag_insert() { let rng = &mut TestRng::default(); - let mut dag = DAG::::new(); + let mut dag = DAG::::new(); const ROUND: u64 = 2; @@ -187,7 +187,7 @@ mod tests { #[test] fn test_dag_commit() { let rng = &mut TestRng::default(); - let mut dag = DAG::::new(); + let mut dag = DAG::::new(); // Sample a certificate for round 2 and 3 with the same author. let certificate_2 = sample_batch_certificate_for_round(2, &mut TestRng::fixed(123456789)); @@ -234,7 +234,7 @@ mod tests { #[test] fn test_is_recently_committed() { - let mut dag = DAG::::new(); + let mut dag = DAG::::new(); // Sample a certificate for round 2, 3, and 4 with the same author. let certificate_2 = sample_batch_certificate_for_round(2, &mut TestRng::fixed(123456789)); diff --git a/node/bft/src/helpers/mod.rs b/node/bft/src/helpers/mod.rs index f690aa4cd9..c37950839d 100644 --- a/node/bft/src/helpers/mod.rs +++ b/node/bft/src/helpers/mod.rs @@ -30,12 +30,18 @@ pub use pending::*; pub mod proposal; pub use proposal::*; +pub mod proposal_cache; +pub use proposal_cache::*; + pub mod ready; pub use ready::*; pub mod resolver; pub use resolver::*; +pub mod signed_proposals; +pub use signed_proposals::*; + pub mod storage; pub use storage::*; diff --git a/node/bft/src/helpers/partition.rs b/node/bft/src/helpers/partition.rs index 4281e749f3..fd64cc0932 100644 --- a/node/bft/src/helpers/partition.rs +++ b/node/bft/src/helpers/partition.rs @@ -76,9 +76,9 @@ pub fn assign_to_workers( #[cfg(test)] mod tests { use super::*; - use snarkvm::prelude::coinbase::PuzzleCommitment; + use snarkvm::prelude::puzzle::SolutionID; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; #[test] fn test_assign_to_worker() { @@ -90,8 +90,8 @@ mod tests { ]); let hash = sha256d_to_u128(data); assert_eq!(hash, 274520597840828436951879875061540363633u128); - let transmission_id: TransmissionID = TransmissionID::Solution(PuzzleCommitment::default()); + let transmission_id: TransmissionID = TransmissionID::Solution(SolutionID::from(123456789)); let worker_id = assign_to_worker(transmission_id, 5).unwrap(); - assert_eq!(worker_id, 4); + assert_eq!(worker_id, 2); } } diff --git a/node/bft/src/helpers/pending.rs b/node/bft/src/helpers/pending.rs index 7ffbda469b..cf68a39e54 100644 --- a/node/bft/src/helpers/pending.rs +++ b/node/bft/src/helpers/pending.rs @@ -12,22 +12,44 @@ // See the License for the specific language governing permissions and // limitations under the License. -use parking_lot::{Mutex, RwLock}; +use crate::MAX_FETCH_TIMEOUT_IN_MS; +use snarkos_node_bft_ledger_service::LedgerService; +use snarkvm::{console::network::Network, ledger::committee::Committee}; + +use parking_lot::RwLock; use std::{ collections::{HashMap, HashSet}, hash::Hash, net::SocketAddr, + sync::Arc, }; +use time::OffsetDateTime; use tokio::sync::oneshot; +/// The maximum number of seconds to wait before expiring a callback. +/// We ensure that we don't truncate `MAX_FETCH_TIMEOUT_IN_MS` when converting to seconds. +pub(crate) const CALLBACK_EXPIRATION_IN_SECS: i64 = MAX_FETCH_TIMEOUT_IN_MS.div_ceil(1000) as i64; + +/// Returns the maximum number of redundant requests for the number of validators in the specified round. +pub fn max_redundant_requests(ledger: Arc>, round: u64) -> usize { + // Determine the number of validators in the committee lookback for the given round. + let num_validators = ledger + .get_committee_lookback_for_round(round) + .map(|committee| committee.num_members()) + .ok() + .unwrap_or(Committee::::MAX_COMMITTEE_SIZE as usize); + + // Note: It is adequate to set this value to the availability threshold, + // as with high probability one will respond honestly (in the best and worst case + // with stake spread across the validators evenly and unevenly, respectively). + 1 + num_validators.saturating_div(3) +} + #[derive(Debug)] pub struct Pending { - /// The map of pending `items` to `peer IPs` that have the item. - pending: RwLock>>, - /// TODO (howardwu): Expire callbacks that have not been called after a certain amount of time, - /// or clear the callbacks that are older than a certain round. - /// The optional callback queue. - callbacks: Mutex>>>, + /// The map of pending `items` to a map of `peer IPs` and their optional `callback` queue. + /// Each callback has a timeout and a flag indicating if it is associated with a sent request. + pending: RwLock, i64, bool)>>>>, } impl Default for Pending { @@ -40,7 +62,7 @@ impl Default for Pending Pending { /// Initializes a new instance of the pending queue. pub fn new() -> Self { - Self { pending: Default::default(), callbacks: Default::default() } + Self { pending: Default::default() } } /// Returns `true` if the pending queue is empty. @@ -60,12 +82,35 @@ impl Pending { /// Returns `true` if the pending queue contains the specified `item` for the specified `peer IP`. pub fn contains_peer(&self, item: impl Into, peer_ip: SocketAddr) -> bool { - self.pending.read().get(&item.into()).map_or(false, |peer_ips| peer_ips.contains(&peer_ip)) + self.pending.read().get(&item.into()).map_or(false, |peer_ips| peer_ips.contains_key(&peer_ip)) } /// Returns the peer IPs for the specified `item`. - pub fn get(&self, item: impl Into) -> Option> { - self.pending.read().get(&item.into()).cloned() + pub fn get_peers(&self, item: impl Into) -> Option> { + self.pending.read().get(&item.into()).map(|map| map.keys().cloned().collect()) + } + + /// Returns the number of pending callbacks for the specified `item`. + pub fn num_callbacks(&self, item: impl Into) -> usize { + let item = item.into(); + let now = OffsetDateTime::now_utc().unix_timestamp(); + // Clear the callbacks that have expired. + self.clear_expired_callbacks_for_item(now, item); + // Return the number of live callbacks. + self.pending.read().get(&item).map_or(0, |peers| peers.values().fold(0, |acc, v| acc.saturating_add(v.len()))) + } + + /// Returns the number of pending sent requests for the specified `item`. + pub fn num_sent_requests(&self, item: impl Into) -> usize { + let item = item.into(); + let now = OffsetDateTime::now_utc().unix_timestamp(); + // Clear the callbacks that have expired. + self.clear_expired_callbacks_for_item(now, item); + // Return the number of live callbacks. + self.pending + .read() + .get(&item) + .map_or(0, |peers| peers.values().flatten().filter(|(_, _, request_sent)| *request_sent).count()) } /// Inserts the specified `item` and `peer IP` to the pending queue, @@ -73,14 +118,39 @@ impl Pending { /// /// In addition, an optional `callback` may be provided, that is triggered upon removal. /// Note: The callback, if provided, is **always** inserted into the callback queue. - pub fn insert(&self, item: impl Into, peer_ip: SocketAddr, callback: Option>) -> bool { + pub fn insert( + &self, + item: impl Into, + peer_ip: SocketAddr, + callback: Option<(oneshot::Sender, bool)>, + ) -> bool { let item = item.into(); - // Insert the peer IP into the pending queue. - let result = self.pending.write().entry(item).or_default().insert(peer_ip); - // If a callback is provided, insert it into the callback queue. - if let Some(callback) = callback { - self.callbacks.lock().entry(item).or_default().push(callback); - } + let now = OffsetDateTime::now_utc().unix_timestamp(); + // Insert the peer IP and optional callback into the pending queue. + let result = { + // Acquire the pending lock. + let mut pending = self.pending.write(); + + // Insert a peer into the pending queue. + let entry = pending.entry(item).or_default(); + + // Check if the peer IP is already present in the entry. + let is_new_peer = !entry.contains_key(&peer_ip); + + // Get the entry for the peer IP. + let peer_entry = entry.entry(peer_ip).or_default(); + + // If a callback is provided, insert it into the callback queue. + if let Some((callback, request_sent)) = callback { + peer_entry.push((callback, now, request_sent)); + } + + is_new_peer + }; + + // Clear the callbacks that have expired. + self.clear_expired_callbacks_for_item(now, item); + // Return the result. result } @@ -90,19 +160,68 @@ impl Pending { /// If the `item` does not exist, `None` is returned. pub fn remove(&self, item: impl Into, callback_value: Option) -> Option> { let item = item.into(); - // Remove the item from the pending queue. - let result = self.pending.write().remove(&item); - // Remove the callback for the item, and process any remaining callbacks. - if let Some(callbacks) = self.callbacks.lock().remove(&item) { - if let Some(callback_value) = callback_value { - // Send a notification to the callback. - for callback in callbacks { - callback.send(callback_value.clone()).ok(); + // Remove the item from the pending queue and process any remaining callbacks. + match self.pending.write().remove(&item) { + Some(callbacks) => { + // Get the peer IPs. + let peer_ips = callbacks.keys().copied().collect(); + // Process the callbacks. + if let Some(callback_value) = callback_value { + // Send a notification to the callback. + for (callback, _, _) in callbacks.into_values().flat_map(|callbacks| callbacks.into_iter()) { + callback.send(callback_value.clone()).ok(); + } } + // Return the peer IPs. + Some(peer_ips) } + None => None, } - // Return the result. - result + } + + /// Removes the callbacks for the specified `item` that have expired. + pub fn clear_expired_callbacks_for_item(&self, now: i64, item: impl Into) { + let item = item.into(); + + // Acquire the pending lock. + let mut pending = self.pending.write(); + + // Clear the callbacks that have expired. + if let Some(peer_map) = pending.get_mut(&item) { + // Iterate over each peer IP for the item and filter out expired callbacks. + for (_, callbacks) in peer_map.iter_mut() { + callbacks.retain(|(_, timestamp, _)| now - *timestamp <= CALLBACK_EXPIRATION_IN_SECS); + } + + // Remove peer IPs that no longer have any callbacks. + peer_map.retain(|_, callbacks| !callbacks.is_empty()); + + // If there are no more remaining callbacks for the item across all peer IPs, remove the item from pending. + if peer_map.is_empty() { + pending.remove(&item); + } + } + } + + /// Removes the callbacks for all items have that expired. + pub fn clear_expired_callbacks(&self) { + let now = OffsetDateTime::now_utc().unix_timestamp(); + // Acquire the pending lock once for write access. + let mut pending = self.pending.write(); + + // Iterate over all items in pending to modify the data structure in-place. + pending.retain(|_, peer_map| { + // Iterate over each peer IP for the item and filter out expired callbacks. + for (_, callbacks) in peer_map.iter_mut() { + callbacks.retain(|(_, timestamp, _)| now - *timestamp <= CALLBACK_EXPIRATION_IN_SECS); + } + + // Remove peer IPs that no longer have any callbacks. + peer_map.retain(|_, callbacks| !callbacks.is_empty()); + + // Keep the item in the pending map only if there are callbacks left. + !peer_map.is_empty() + }); } } @@ -110,11 +229,15 @@ impl Pending { mod tests { use super::*; use snarkvm::{ - ledger::{coinbase::PuzzleCommitment, narwhal::TransmissionID}, + ledger::narwhal::TransmissionID, prelude::{Rng, TestRng}, }; - type CurrentNetwork = snarkvm::prelude::Testnet3; + use std::{thread, time::Duration}; + + type CurrentNetwork = snarkvm::prelude::MainnetV0; + + const ITERATIONS: usize = 100; #[test] fn test_pending() { @@ -127,27 +250,32 @@ mod tests { assert!(pending.is_empty()); assert_eq!(pending.len(), 0); - // Initialize the commitments. - let commitment_1 = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); - let commitment_2 = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); - let commitment_3 = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); + // Initialize the solution IDs. + let solution_id_1 = TransmissionID::Solution(rng.gen::().into()); + let solution_id_2 = TransmissionID::Solution(rng.gen::().into()); + let solution_id_3 = TransmissionID::Solution(rng.gen::().into()); // Initialize the SocketAddrs. let addr_1 = SocketAddr::from(([127, 0, 0, 1], 1234)); let addr_2 = SocketAddr::from(([127, 0, 0, 1], 2345)); let addr_3 = SocketAddr::from(([127, 0, 0, 1], 3456)); - // Insert the commitments. - assert!(pending.insert(commitment_1, addr_1, None)); - assert!(pending.insert(commitment_2, addr_2, None)); - assert!(pending.insert(commitment_3, addr_3, None)); + // Initialize the callbacks. + let (callback_sender_1, _) = oneshot::channel(); + let (callback_sender_2, _) = oneshot::channel(); + let (callback_sender_3, _) = oneshot::channel(); + + // Insert the solution IDs. + assert!(pending.insert(solution_id_1, addr_1, Some((callback_sender_1, true)))); + assert!(pending.insert(solution_id_2, addr_2, Some((callback_sender_2, true)))); + assert!(pending.insert(solution_id_3, addr_3, Some((callback_sender_3, true)))); // Check the number of SocketAddrs. assert_eq!(pending.len(), 3); assert!(!pending.is_empty()); // Check the items. - let ids = [commitment_1, commitment_2, commitment_3]; + let ids = [solution_id_1, solution_id_2, solution_id_3]; let peers = [addr_1, addr_2, addr_3]; for i in 0..3 { @@ -155,24 +283,151 @@ mod tests { assert!(pending.contains(id)); assert!(pending.contains_peer(id, peers[i])); } - let unknown_id = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); + let unknown_id = TransmissionID::Solution(rng.gen::().into()); assert!(!pending.contains(unknown_id)); // Check get. - assert_eq!(pending.get(commitment_1), Some(HashSet::from([addr_1]))); - assert_eq!(pending.get(commitment_2), Some(HashSet::from([addr_2]))); - assert_eq!(pending.get(commitment_3), Some(HashSet::from([addr_3]))); - assert_eq!(pending.get(unknown_id), None); + assert_eq!(pending.get_peers(solution_id_1), Some(HashSet::from([addr_1]))); + assert_eq!(pending.get_peers(solution_id_2), Some(HashSet::from([addr_2]))); + assert_eq!(pending.get_peers(solution_id_3), Some(HashSet::from([addr_3]))); + assert_eq!(pending.get_peers(unknown_id), None); // Check remove. - assert!(pending.remove(commitment_1, None).is_some()); - assert!(pending.remove(commitment_2, None).is_some()); - assert!(pending.remove(commitment_3, None).is_some()); + assert!(pending.remove(solution_id_1, None).is_some()); + assert!(pending.remove(solution_id_2, None).is_some()); + assert!(pending.remove(solution_id_3, None).is_some()); assert!(pending.remove(unknown_id, None).is_none()); // Check empty again. assert!(pending.is_empty()); } + + #[test] + fn test_expired_callbacks() { + let rng = &mut TestRng::default(); + + // Initialize the ready queue. + let pending = Pending::, ()>::new(); + + // Check initially empty. + assert!(pending.is_empty()); + assert_eq!(pending.len(), 0); + + // Initialize the solution ID. + let solution_id_1 = TransmissionID::Solution(rng.gen::().into()); + + // Initialize the SocketAddrs. + let addr_1 = SocketAddr::from(([127, 0, 0, 1], 1234)); + let addr_2 = SocketAddr::from(([127, 0, 0, 1], 2345)); + let addr_3 = SocketAddr::from(([127, 0, 0, 1], 3456)); + + // Initialize the callbacks. + let (callback_sender_1, _) = oneshot::channel(); + let (callback_sender_2, _) = oneshot::channel(); + let (callback_sender_3, _) = oneshot::channel(); + + // Insert the solution ID. + assert!(pending.insert(solution_id_1, addr_1, Some((callback_sender_1, true)))); + assert!(pending.insert(solution_id_1, addr_2, Some((callback_sender_2, true)))); + + // Sleep for a few seconds. + thread::sleep(Duration::from_secs(CALLBACK_EXPIRATION_IN_SECS as u64 - 1)); + + assert!(pending.insert(solution_id_1, addr_3, Some((callback_sender_3, true)))); + + // Check that the number of callbacks has not changed. + assert_eq!(pending.num_callbacks(solution_id_1), 3); + + // Wait for 2 seconds. + thread::sleep(Duration::from_secs(2)); + + // Ensure that the expired callbacks have been removed. + assert_eq!(pending.num_callbacks(solution_id_1), 1); + + // Wait for ` CALLBACK_EXPIRATION_IN_SECS` seconds. + thread::sleep(Duration::from_secs(CALLBACK_EXPIRATION_IN_SECS as u64)); + + // Ensure that the expired callbacks have been removed. + assert_eq!(pending.num_callbacks(solution_id_1), 0); + } + + #[test] + fn test_num_sent_requests() { + let rng = &mut TestRng::default(); + + // Initialize the ready queue. + let pending = Pending::, ()>::new(); + + for _ in 0..ITERATIONS { + // Generate a solution ID. + let solution_id = TransmissionID::Solution(rng.gen::().into()); + // Check if the number of sent requests is correct. + let mut expected_num_sent_requests = 0; + for i in 0..ITERATIONS { + // Generate a peer address. + let addr = SocketAddr::from(([127, 0, 0, 1], i as u16)); + // Initialize a callback. + let (callback_sender, _) = oneshot::channel(); + // Randomly determine if the callback is associated with a sent request. + let is_sent_request = rng.gen(); + // Increment the expected number of sent requests. + if is_sent_request { + expected_num_sent_requests += 1; + } + // Insert the solution ID. + assert!(pending.insert(solution_id, addr, Some((callback_sender, is_sent_request)))); + } + // Ensure that the number of sent requests is correct. + assert_eq!(pending.num_sent_requests(solution_id), expected_num_sent_requests); + } + } + + #[test] + fn test_expired_items() { + let rng = &mut TestRng::default(); + + // Initialize the ready queue. + let pending = Pending::, ()>::new(); + + // Check initially empty. + assert!(pending.is_empty()); + assert_eq!(pending.len(), 0); + + // Initialize the solution IDs. + let solution_id_1 = TransmissionID::Solution(rng.gen::().into()); + let solution_id_2 = TransmissionID::Solution(rng.gen::().into()); + + // Initialize the SocketAddrs. + let addr_1 = SocketAddr::from(([127, 0, 0, 1], 1234)); + let addr_2 = SocketAddr::from(([127, 0, 0, 1], 2345)); + let addr_3 = SocketAddr::from(([127, 0, 0, 1], 3456)); + + // Initialize the callbacks. + let (callback_sender_1, _) = oneshot::channel(); + let (callback_sender_2, _) = oneshot::channel(); + let (callback_sender_3, _) = oneshot::channel(); + + // Insert the commitments. + assert!(pending.insert(solution_id_1, addr_1, Some((callback_sender_1, true)))); + assert!(pending.insert(solution_id_1, addr_2, Some((callback_sender_2, true)))); + assert!(pending.insert(solution_id_2, addr_3, Some((callback_sender_3, true)))); + + // Ensure that the items have not been expired yet. + assert_eq!(pending.num_callbacks(solution_id_1), 2); + assert_eq!(pending.num_callbacks(solution_id_2), 1); + assert_eq!(pending.len(), 2); + + // Wait for ` CALLBACK_EXPIRATION_IN_SECS + 1` seconds. + thread::sleep(Duration::from_secs(CALLBACK_EXPIRATION_IN_SECS as u64 + 1)); + + // Expire the pending callbacks. + pending.clear_expired_callbacks(); + + // Ensure that the items have been expired. + assert_eq!(pending.num_callbacks(solution_id_1), 0); + assert_eq!(pending.num_callbacks(solution_id_2), 0); + assert!(pending.is_empty()); + } } #[cfg(test)] @@ -196,7 +451,11 @@ mod prop_tests { pub fn to_pending(&self) -> Pending { let pending = Pending::::new(); for i in 0..self.count { - pending.insert(Item { id: i }, SocketAddr::from(([127, 0, 0, 1], i as u16)), None); + pending.insert( + Item { id: i }, + SocketAddr::from(([127, 0, 0, 1], i as u16)), + Some((oneshot::channel().0, true)), + ); } pending } @@ -208,13 +467,13 @@ mod prop_tests { assert_eq!(pending.len(), input.count); assert!(!pending.is_empty()); assert!(!pending.contains(Item { id: input.count + 1 })); - assert_eq!(pending.get(Item { id: input.count + 1 }), None); + assert_eq!(pending.get_peers(Item { id: input.count + 1 }), None); assert!(pending.remove(Item { id: input.count + 1 }, None).is_none()); for i in 0..input.count { assert!(pending.contains(Item { id: i })); let peer_ip = SocketAddr::from(([127, 0, 0, 1], i as u16)); assert!(pending.contains_peer(Item { id: i }, peer_ip)); - assert_eq!(pending.get(Item { id: i }), Some(HashSet::from([peer_ip]))); + assert_eq!(pending.get_peers(Item { id: i }), Some(HashSet::from([peer_ip]))); assert!(pending.remove(Item { id: i }, None).is_some()); } assert!(pending.is_empty()); diff --git a/node/bft/src/helpers/proposal.rs b/node/bft/src/helpers/proposal.rs index 8e904435a2..aefb1bd753 100644 --- a/node/bft/src/helpers/proposal.rs +++ b/node/bft/src/helpers/proposal.rs @@ -22,12 +22,13 @@ use snarkvm::{ committee::Committee, narwhal::{BatchCertificate, BatchHeader, Transmission, TransmissionID}, }, - prelude::{bail, ensure, Itertools, Result}, + prelude::{bail, ensure, error, FromBytes, IoResult, Itertools, Read, Result, ToBytes, Write}, }; use indexmap::{IndexMap, IndexSet}; use std::collections::HashSet; +#[derive(Debug, PartialEq, Eq)] pub struct Proposal { /// The proposed batch header. batch_header: BatchHeader, @@ -48,8 +49,6 @@ impl Proposal { ensure!(batch_header.round() >= committee.starting_round(), "Batch round must be >= the committee round"); // Ensure the batch author is a member of the committee. ensure!(committee.is_committee_member(batch_header.author()), "The batch author is not a committee member"); - // Ensure the transmissions are not empty. - ensure!(!transmissions.is_empty(), "The transmissions are empty"); // Ensure the transmission IDs match in the batch header and transmissions. ensure!( batch_header.transmission_ids().len() == transmissions.len(), @@ -169,6 +168,94 @@ impl Proposal { } } +impl ToBytes for Proposal { + fn write_le(&self, mut writer: W) -> IoResult<()> { + // Write the batch header. + self.batch_header.write_le(&mut writer)?; + // Write the number of transmissions. + u32::try_from(self.transmissions.len()).map_err(error)?.write_le(&mut writer)?; + // Write the transmissions. + for (transmission_id, transmission) in &self.transmissions { + transmission_id.write_le(&mut writer)?; + transmission.write_le(&mut writer)?; + } + // Write the number of signatures. + u32::try_from(self.signatures.len()).map_err(error)?.write_le(&mut writer)?; + // Write the signatures. + for signature in &self.signatures { + signature.write_le(&mut writer)?; + } + Ok(()) + } +} + +impl FromBytes for Proposal { + fn read_le(mut reader: R) -> IoResult { + // Read the batch header. + let batch_header = FromBytes::read_le(&mut reader)?; + // Read the number of transmissions. + let num_transmissions = u32::read_le(&mut reader)?; + // Ensure the number of transmissions is within bounds (this is an early safety check). + if num_transmissions as usize > BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH { + return Err(error("Invalid number of transmissions in the proposal")); + } + // Read the transmissions. + let mut transmissions = IndexMap::default(); + for _ in 0..num_transmissions { + let transmission_id = FromBytes::read_le(&mut reader)?; + let transmission = FromBytes::read_le(&mut reader)?; + transmissions.insert(transmission_id, transmission); + } + // Read the number of signatures. + let num_signatures = u32::read_le(&mut reader)?; + // Ensure the number of signatures is within bounds (this is an early safety check). + if num_signatures as usize > Committee::::MAX_COMMITTEE_SIZE as usize { + return Err(error("Invalid number of signatures in the proposal")); + } + // Read the signatures. + let mut signatures = IndexSet::default(); + for _ in 0..num_signatures { + signatures.insert(FromBytes::read_le(&mut reader)?); + } + + Ok(Self { batch_header, transmissions, signatures }) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::helpers::storage::tests::sample_transmissions; + use snarkvm::{console::network::MainnetV0, utilities::TestRng}; + + type CurrentNetwork = MainnetV0; + + const ITERATIONS: usize = 100; + + pub(crate) fn sample_proposal(rng: &mut TestRng) -> Proposal { + let certificate = snarkvm::ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificate(rng); + let (_, transmissions) = sample_transmissions(&certificate, rng); + + let transmissions = transmissions.into_iter().map(|(id, (t, _))| (id, t)).collect::>(); + let batch_header = certificate.batch_header().clone(); + let signatures = certificate.signatures().copied().collect(); + + Proposal { batch_header, transmissions, signatures } + } + + #[test] + fn test_bytes() { + let rng = &mut TestRng::default(); + + for _ in 0..ITERATIONS { + let expected = sample_proposal(rng); + // Check the byte representation. + let expected_bytes = expected.to_bytes_le().unwrap(); + assert_eq!(expected, Proposal::read_le(&expected_bytes[..]).unwrap()); + } + } +} + #[cfg(test)] mod prop_tests { use crate::helpers::{ @@ -205,9 +292,9 @@ mod prop_tests { &signer.private_key, committee.starting_round(), now(), + committee.id(), transmission_map.keys().cloned().collect(), Default::default(), - Default::default(), &mut rng, ) .unwrap(); diff --git a/node/bft/src/helpers/proposal_cache.rs b/node/bft/src/helpers/proposal_cache.rs new file mode 100644 index 0000000000..cbc6cc18a1 --- /dev/null +++ b/node/bft/src/helpers/proposal_cache.rs @@ -0,0 +1,224 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::helpers::{Proposal, SignedProposals}; + +use snarkvm::{ + console::{account::Address, network::Network, program::SUBDAG_CERTIFICATES_DEPTH}, + ledger::narwhal::BatchCertificate, + prelude::{anyhow, bail, error, FromBytes, IoResult, Read, Result, ToBytes, Write}, +}; + +use aleo_std::{aleo_ledger_dir, StorageMode}; +use indexmap::IndexSet; +use std::{fs, path::PathBuf}; + +/// Returns the path where a proposal cache file may be stored. +pub fn proposal_cache_path(network: u16, dev: Option) -> PathBuf { + const PROPOSAL_CACHE_FILE_NAME: &str = "current-proposal-cache"; + + // Obtain the path to the ledger. + let mut path = aleo_ledger_dir(network, StorageMode::from(dev)); + // Go to the folder right above the ledger. + path.pop(); + // Append the proposal store's file name. + match dev { + Some(id) => path.push(&format!(".{PROPOSAL_CACHE_FILE_NAME}-{}-{}", network, id)), + None => path.push(&format!("{PROPOSAL_CACHE_FILE_NAME}-{}", network)), + } + + path +} + +/// A helper type for the cache of proposal and signed proposals. +#[derive(Debug, PartialEq, Eq)] +pub struct ProposalCache { + /// The latest round this node was on prior to the reboot. + latest_round: u64, + /// The latest proposal this node has created. + proposal: Option>, + /// The signed proposals this node has received. + signed_proposals: SignedProposals, + /// The pending certificates in storage that have not been included in the ledger. + pending_certificates: IndexSet>, +} + +impl ProposalCache { + /// Initializes a new instance of the proposal cache. + pub fn new( + latest_round: u64, + proposal: Option>, + signed_proposals: SignedProposals, + pending_certificates: IndexSet>, + ) -> Self { + Self { latest_round, proposal, signed_proposals, pending_certificates } + } + + /// Ensure that the proposal and every signed proposal is associated with the `expected_signer`. + pub fn is_valid(&self, expected_signer: Address) -> bool { + self.proposal + .as_ref() + .map(|proposal| { + proposal.batch_header().author() == expected_signer && self.latest_round == proposal.round() + }) + .unwrap_or(true) + && self.signed_proposals.is_valid(expected_signer) + } + + /// Returns `true` if a proposal cache exists for the given network and `dev`. + pub fn exists(dev: Option) -> bool { + proposal_cache_path(N::ID, dev).exists() + } + + /// Load the proposal cache from the file system and ensure that the proposal cache is valid. + pub fn load(expected_signer: Address, dev: Option) -> Result { + // Construct the proposal cache file system path. + let path = proposal_cache_path(N::ID, dev); + + // Deserialize the proposal cache from the file system. + let proposal_cache = match fs::read(&path) { + Ok(bytes) => match Self::from_bytes_le(&bytes) { + Ok(proposal_cache) => proposal_cache, + Err(_) => bail!("Couldn't deserialize the proposal stored at {}", path.display()), + }, + Err(_) => bail!("Couldn't read the proposal stored at {}", path.display()), + }; + + // Ensure the proposal cache is valid. + if !proposal_cache.is_valid(expected_signer) { + bail!("The proposal cache is invalid for the given address {expected_signer}"); + } + + info!("Loaded the proposal cache from {} at round {}", path.display(), proposal_cache.latest_round); + + Ok(proposal_cache) + } + + /// Store the proposal cache to the file system. + pub fn store(&self, dev: Option) -> Result<()> { + let path = proposal_cache_path(N::ID, dev); + info!("Storing the proposal cache to {}...", path.display()); + + // Serialize the proposal cache. + let bytes = self.to_bytes_le()?; + // Store the proposal cache to the file system. + fs::write(&path, bytes) + .map_err(|err| anyhow!("Couldn't write the proposal cache to {} - {err}", path.display()))?; + + Ok(()) + } + + /// Returns the latest round, proposal, signed proposals, and pending certificates. + pub fn into(self) -> (u64, Option>, SignedProposals, IndexSet>) { + (self.latest_round, self.proposal, self.signed_proposals, self.pending_certificates) + } +} + +impl ToBytes for ProposalCache { + fn write_le(&self, mut writer: W) -> IoResult<()> { + // Serialize the `latest_round`. + self.latest_round.write_le(&mut writer)?; + // Serialize the `proposal`. + self.proposal.is_some().write_le(&mut writer)?; + if let Some(proposal) = &self.proposal { + proposal.write_le(&mut writer)?; + } + // Serialize the `signed_proposals`. + self.signed_proposals.write_le(&mut writer)?; + // Write the number of pending certificates. + u32::try_from(self.pending_certificates.len()).map_err(error)?.write_le(&mut writer)?; + // Serialize the pending certificates. + for certificate in &self.pending_certificates { + certificate.write_le(&mut writer)?; + } + + Ok(()) + } +} + +impl FromBytes for ProposalCache { + fn read_le(mut reader: R) -> IoResult { + // Deserialize `latest_round`. + let latest_round = u64::read_le(&mut reader)?; + // Deserialize `proposal`. + let has_proposal: bool = FromBytes::read_le(&mut reader)?; + let proposal = match has_proposal { + true => Some(Proposal::read_le(&mut reader)?), + false => None, + }; + // Deserialize `signed_proposals`. + let signed_proposals = SignedProposals::read_le(&mut reader)?; + // Read the number of pending certificates. + let num_certificates = u32::read_le(&mut reader)?; + // Ensure the number of certificates is within bounds. + if num_certificates > 2u32.saturating_pow(SUBDAG_CERTIFICATES_DEPTH as u32) { + return Err(error(format!( + "Number of certificates ({num_certificates}) exceeds the maximum ({})", + 2u32.saturating_pow(SUBDAG_CERTIFICATES_DEPTH as u32) + ))); + }; + // Deserialize the pending certificates. + let pending_certificates = + (0..num_certificates).map(|_| BatchCertificate::read_le(&mut reader)).collect::>>()?; + + Ok(Self::new(latest_round, proposal, signed_proposals, pending_certificates)) + } +} + +impl Default for ProposalCache { + /// Initializes a new instance of the proposal cache. + fn default() -> Self { + Self::new(0, None, Default::default(), Default::default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::helpers::{proposal::tests::sample_proposal, signed_proposals::tests::sample_signed_proposals}; + use snarkvm::{ + console::{account::PrivateKey, network::MainnetV0}, + ledger::narwhal::batch_certificate::test_helpers::sample_batch_certificates, + utilities::TestRng, + }; + + type CurrentNetwork = MainnetV0; + + const ITERATIONS: usize = 100; + + pub(crate) fn sample_proposal_cache( + signer: &PrivateKey, + rng: &mut TestRng, + ) -> ProposalCache { + let proposal = sample_proposal(rng); + let signed_proposals = sample_signed_proposals(signer, rng); + let round = proposal.round(); + let pending_certificates = sample_batch_certificates(rng); + + ProposalCache::new(round, Some(proposal), signed_proposals, pending_certificates) + } + + #[test] + fn test_bytes() { + let rng = &mut TestRng::default(); + let singer_private_key = PrivateKey::::new(rng).unwrap(); + + for _ in 0..ITERATIONS { + let expected = sample_proposal_cache(&singer_private_key, rng); + // Check the byte representation. + let expected_bytes = expected.to_bytes_le().unwrap(); + assert_eq!(expected, ProposalCache::read_le(&expected_bytes[..]).unwrap()); + } + } +} diff --git a/node/bft/src/helpers/ready.rs b/node/bft/src/helpers/ready.rs index 09bff73dab..6fa581ccdb 100644 --- a/node/bft/src/helpers/ready.rs +++ b/node/bft/src/helpers/ready.rs @@ -16,8 +16,8 @@ use snarkvm::{ console::prelude::*, ledger::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, narwhal::{Data, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, }; @@ -80,7 +80,7 @@ impl Ready { } /// Returns the solutions in the ready queue. - pub fn solutions(&self) -> impl '_ + Iterator, Data>)> { + pub fn solutions(&self) -> impl '_ + Iterator, Data>)> { self.transmissions.read().clone().into_iter().filter_map(|(id, transmission)| match (id, transmission) { (TransmissionID::Solution(id), Transmission::Solution(solution)) => Some((id, solution)), _ => None, @@ -131,11 +131,11 @@ impl Ready { #[cfg(test)] mod tests { use super::*; - use snarkvm::ledger::{coinbase::PuzzleCommitment, narwhal::Data}; + use snarkvm::ledger::narwhal::Data; use ::bytes::Bytes; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; #[test] fn test_ready() { @@ -147,38 +147,38 @@ mod tests { // Initialize the ready queue. let ready = Ready::::new(); - // Initialize the commitments. - let commitment_1 = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); - let commitment_2 = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); - let commitment_3 = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); + // Initialize the solution IDs. + let solution_id_1 = TransmissionID::Solution(rng.gen::().into()); + let solution_id_2 = TransmissionID::Solution(rng.gen::().into()); + let solution_id_3 = TransmissionID::Solution(rng.gen::().into()); // Initialize the solutions. let solution_1 = Transmission::Solution(data(rng)); let solution_2 = Transmission::Solution(data(rng)); let solution_3 = Transmission::Solution(data(rng)); - // Insert the commitments. - assert!(ready.insert(commitment_1, solution_1.clone())); - assert!(ready.insert(commitment_2, solution_2.clone())); - assert!(ready.insert(commitment_3, solution_3.clone())); + // Insert the solution IDs. + assert!(ready.insert(solution_id_1, solution_1.clone())); + assert!(ready.insert(solution_id_2, solution_2.clone())); + assert!(ready.insert(solution_id_3, solution_3.clone())); // Check the number of transmissions. assert_eq!(ready.num_transmissions(), 3); // Check the transmission IDs. - let transmission_ids = vec![commitment_1, commitment_2, commitment_3].into_iter().collect::>(); + let transmission_ids = vec![solution_id_1, solution_id_2, solution_id_3].into_iter().collect::>(); assert_eq!(ready.transmission_ids(), transmission_ids); transmission_ids.iter().for_each(|id| assert!(ready.contains(*id))); - // Check that an unknown commitment is not in the ready queue. - let commitment_unknown = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); - assert!(!ready.contains(commitment_unknown)); + // Check that an unknown solution ID is not in the ready queue. + let solution_id_unknown = TransmissionID::Solution(rng.gen::().into()); + assert!(!ready.contains(solution_id_unknown)); // Check the transmissions. - assert_eq!(ready.get(commitment_1), Some(solution_1.clone())); - assert_eq!(ready.get(commitment_2), Some(solution_2.clone())); - assert_eq!(ready.get(commitment_3), Some(solution_3.clone())); - assert_eq!(ready.get(commitment_unknown), None); + assert_eq!(ready.get(solution_id_1), Some(solution_1.clone())); + assert_eq!(ready.get(solution_id_2), Some(solution_2.clone())); + assert_eq!(ready.get(solution_id_3), Some(solution_3.clone())); + assert_eq!(ready.get(solution_id_unknown), None); // Drain the ready queue. let transmissions = ready.drain(3); @@ -191,7 +191,7 @@ mod tests { // Check the transmissions. assert_eq!( transmissions, - vec![(commitment_1, solution_1), (commitment_2, solution_2), (commitment_3, solution_3)] + vec![(solution_id_1, solution_1), (solution_id_2, solution_2), (solution_id_3, solution_3)] .into_iter() .collect::>() ); @@ -210,15 +210,15 @@ mod tests { // Initialize the ready queue. let ready = Ready::::new(); - // Initialize the commitments. - let commitment = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); + // Initialize the solution ID. + let solution_id = TransmissionID::Solution(rng.gen::().into()); - // Initialize the solutions. + // Initialize the solution. let solution = Transmission::Solution(data); - // Insert the commitments. - assert!(ready.insert(commitment, solution.clone())); - assert!(!ready.insert(commitment, solution)); + // Insert the solution ID. + assert!(ready.insert(solution_id, solution.clone())); + assert!(!ready.insert(solution_id, solution)); // Check the number of transmissions. assert_eq!(ready.num_transmissions(), 1); diff --git a/node/bft/src/helpers/resolver.rs b/node/bft/src/helpers/resolver.rs index 89a98b3c87..fa0d1d53b2 100644 --- a/node/bft/src/helpers/resolver.rs +++ b/node/bft/src/helpers/resolver.rs @@ -95,7 +95,7 @@ mod tests { use super::*; use snarkvm::{prelude::Rng, utilities::TestRng}; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; #[test] fn test_resolver() { diff --git a/node/bft/src/helpers/signed_proposals.rs b/node/bft/src/helpers/signed_proposals.rs new file mode 100644 index 0000000000..2fd4a7aee6 --- /dev/null +++ b/node/bft/src/helpers/signed_proposals.rs @@ -0,0 +1,154 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use snarkvm::{ + console::{ + account::{Address, Signature}, + network::Network, + types::Field, + }, + prelude::{error, FromBytes, IoResult, Read, ToBytes, Write}, +}; + +use std::{collections::HashMap, ops::Deref}; + +/// The recently-signed batch proposals. +/// A map of `address` to (`round`, `batch ID`, `signature`). +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct SignedProposals(pub HashMap, (u64, Field, Signature)>); + +impl SignedProposals { + /// Ensure that every signed proposal is associated with the `expected_signer`. + pub fn is_valid(&self, expected_signer: Address) -> bool { + self.0.iter().all(|(_, (_, _, signature))| signature.to_address() == expected_signer) + } +} + +impl ToBytes for SignedProposals { + fn write_le(&self, mut writer: W) -> IoResult<()> { + // Write the number of signed proposals. + u32::try_from(self.0.len()).map_err(error)?.write_le(&mut writer)?; + // Serialize the signed proposals. + for (address, (round, batch_id, signature)) in &self.0 { + // Write the address. + address.write_le(&mut writer)?; + // Write the round. + round.write_le(&mut writer)?; + // Write the batch id. + batch_id.write_le(&mut writer)?; + // Write the signature. + signature.write_le(&mut writer)?; + } + + Ok(()) + } +} + +impl FromBytes for SignedProposals { + fn read_le(mut reader: R) -> IoResult { + // Read the number of signed proposals. + let num_signed_proposals = u32::read_le(&mut reader)?; + // Deserialize the signed proposals. + let mut signed_proposals = HashMap::default(); + for _ in 0..num_signed_proposals { + // Read the address. + let address = FromBytes::read_le(&mut reader)?; + // Read the round. + let round = FromBytes::read_le(&mut reader)?; + // Read the batch id. + let batch_id = FromBytes::read_le(&mut reader)?; + // Read the signature. + let signature = FromBytes::read_le(&mut reader)?; + // Insert the signed proposal. + signed_proposals.insert(address, (round, batch_id, signature)); + } + + Ok(Self(signed_proposals)) + } +} + +impl Deref for SignedProposals { + type Target = HashMap, (u64, Field, Signature)>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Default for SignedProposals { + /// Initializes a new instance of the signed proposals. + fn default() -> Self { + Self(Default::default()) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use snarkvm::{ + console::{account::PrivateKey, network::MainnetV0}, + utilities::{TestRng, Uniform}, + }; + + use rand::Rng; + + type CurrentNetwork = MainnetV0; + + const ITERATIONS: usize = 100; + + pub(crate) fn sample_signed_proposals( + signer: &PrivateKey, + rng: &mut TestRng, + ) -> SignedProposals { + let mut signed_proposals: HashMap<_, _> = Default::default(); + for _ in 0..CurrentNetwork::MAX_CERTIFICATES { + let private_key = PrivateKey::::new(rng).unwrap(); + let address = Address::try_from(&private_key).unwrap(); + + // Add the signed proposal to the map. + let round = rng.gen(); + let batch_id = Field::rand(rng); + let signature = signer.sign(&[batch_id], rng).unwrap(); + signed_proposals.insert(address, (round, batch_id, signature)); + } + + SignedProposals(signed_proposals) + } + + #[test] + fn test_bytes() { + let rng = &mut TestRng::default(); + let singer_private_key = PrivateKey::::new(rng).unwrap(); + + for _ in 0..ITERATIONS { + let expected = sample_signed_proposals(&singer_private_key, rng); + // Check the byte representation. + let expected_bytes = expected.to_bytes_le().unwrap(); + assert_eq!(expected, SignedProposals::read_le(&expected_bytes[..]).unwrap()); + } + } + + #[test] + fn test_is_valid() { + let rng = &mut TestRng::default(); + + for _ in 0..ITERATIONS { + let singer_private_key = PrivateKey::::new(rng).unwrap(); + let singer_address = Address::try_from(&singer_private_key).unwrap(); + let signed_proposals = sample_signed_proposals(&singer_private_key, rng); + // Ensure that the signed proposals are valid. + assert!(signed_proposals.is_valid(singer_address)); + } + } +} diff --git a/node/bft/src/helpers/storage.rs b/node/bft/src/helpers/storage.rs index 1f7b3ce892..bda8bce766 100644 --- a/node/bft/src/helpers/storage.rs +++ b/node/bft/src/helpers/storage.rs @@ -17,15 +17,14 @@ use snarkos_node_bft_ledger_service::LedgerService; use snarkos_node_bft_storage_service::StorageService; use snarkvm::{ ledger::{ - block::Block, + block::{Block, Transaction}, narwhal::{BatchCertificate, BatchHeader, Transmission, TransmissionID}, }, - prelude::{anyhow, bail, cfg_iter, ensure, Address, Field, Network, Result}, + prelude::{anyhow, bail, ensure, Address, Field, Network, Result}, }; use indexmap::{map::Entry, IndexMap, IndexSet}; use parking_lot::RwLock; -use rayon::prelude::*; use std::{ collections::{HashMap, HashSet}, sync::{ @@ -115,6 +114,8 @@ impl Storage { })); // Update the storage to the current round. storage.update_current_round(current_round); + // Perform GC on the current round. + storage.garbage_collect_certificates(current_round); // Return the storage. storage } @@ -204,7 +205,10 @@ impl Storage { fn update_current_round(&self, next_round: u64) { // Update the current round. self.current_round.store(next_round, Ordering::SeqCst); + } + /// Update the storage by performing garbage collection based on the next round. + pub(crate) fn garbage_collect_certificates(&self, next_round: u64) { // Fetch the current GC round. let current_gc_round = self.gc_round(); // Compute the next GC round. @@ -319,6 +323,35 @@ impl Storage { } } + /// Returns the certificates that have not yet been included in the ledger. + /// Note that the order of this set is by round and then insertion. + pub(crate) fn get_pending_certificates(&self) -> IndexSet> { + let mut pending_certificates = IndexSet::new(); + + // Obtain the read locks. + let rounds = self.rounds.read(); + let certificates = self.certificates.read(); + + // Iterate over the rounds. + for (_, certificates_for_round) in rounds.clone().sorted_by(|a, _, b, _| a.cmp(b)) { + // Iterate over the certificates for the round. + for (certificate_id, _, _) in certificates_for_round { + // Skip the certificate if it already exists in the ledger. + if self.ledger.contains_certificate(&certificate_id).unwrap_or(false) { + continue; + } + + // Add the certificate to the pending certificates. + match certificates.get(&certificate_id).cloned() { + Some(certificate) => pending_certificates.insert(certificate), + None => continue, + }; + } + } + + pending_certificates + } + /// Checks the given `batch_header` for validity, returning the missing transmissions from storage. /// /// This method ensures the following invariants: @@ -335,6 +368,7 @@ impl Storage { &self, batch_header: &BatchHeader, transmissions: HashMap, Transmission>, + aborted_transmissions: HashSet>, ) -> Result, Transmission>> { // Retrieve the round. let round = batch_header.round(); @@ -348,12 +382,12 @@ impl Storage { bail!("Batch for round {round} already exists in storage {gc_log}") } - // Retrieve the previous committee for the batch round. - let Ok(previous_committee) = self.ledger.get_previous_committee_for_round(round) else { - bail!("Storage failed to retrieve the committee for round {round} {gc_log}") + // Retrieve the committee lookback for the batch round. + let Ok(committee_lookback) = self.ledger.get_committee_lookback_for_round(round) else { + bail!("Storage failed to retrieve the committee lookback for round {round} {gc_log}") }; // Ensure the author is in the committee. - if !previous_committee.is_committee_member(batch_header.author()) { + if !committee_lookback.is_committee_member(batch_header.author()) { bail!("Author {} is not in the committee for round {round} {gc_log}", batch_header.author()) } @@ -363,15 +397,15 @@ impl Storage { // Retrieve the missing transmissions in storage from the given transmissions. let missing_transmissions = self .transmissions - .find_missing_transmissions(batch_header, transmissions) + .find_missing_transmissions(batch_header, transmissions, aborted_transmissions) .map_err(|e| anyhow!("{e} for round {round} {gc_log}"))?; // Compute the previous round. let previous_round = round.saturating_sub(1); // Check if the previous round is within range of the GC round. if previous_round > gc_round { - // Retrieve the committee for the previous round. - let Ok(previous_committee) = self.ledger.get_previous_committee_for_round(previous_round) else { + // Retrieve the committee lookback for the previous round. + let Ok(previous_committee_lookback) = self.ledger.get_committee_lookback_for_round(previous_round) else { bail!("Missing committee for the previous round {previous_round} in storage {gc_log}") }; // Ensure the previous round certificates exists in storage. @@ -379,7 +413,7 @@ impl Storage { bail!("Missing certificates for the previous round {previous_round} in storage {gc_log}") } // Ensure the number of previous certificate IDs is at or below the number of committee members. - if batch_header.previous_certificate_ids().len() > previous_committee.num_members() { + if batch_header.previous_certificate_ids().len() > previous_committee_lookback.num_members() { bail!("Too many previous certificates for round {round} {gc_log}") } // Initialize a set of the previous authors. @@ -405,7 +439,7 @@ impl Storage { previous_authors.insert(previous_certificate.author()); } // Ensure the previous certificates have reached the quorum threshold. - if !previous_committee.is_quorum_threshold_reached(&previous_authors) { + if !previous_committee_lookback.is_quorum_threshold_reached(&previous_authors) { bail!("Previous certificates for a batch in round {round} did not reach quorum threshold {gc_log}") } } @@ -431,6 +465,7 @@ impl Storage { &self, certificate: &BatchCertificate, transmissions: HashMap, Transmission>, + aborted_transmissions: HashSet>, ) -> Result, Transmission>> { // Retrieve the round. let round = certificate.round(); @@ -450,13 +485,14 @@ impl Storage { } // Ensure the batch header is well-formed. - let missing_transmissions = self.check_batch_header(certificate.batch_header(), transmissions)?; + let missing_transmissions = + self.check_batch_header(certificate.batch_header(), transmissions, aborted_transmissions)?; // Check the timestamp for liveness. check_timestamp_for_liveness(certificate.timestamp())?; - // Retrieve the previous committee for the batch round. - let Ok(previous_committee) = self.ledger.get_previous_committee_for_round(round) else { + // Retrieve the committee lookback for the batch round. + let Ok(committee_lookback) = self.ledger.get_committee_lookback_for_round(round) else { bail!("Storage failed to retrieve the committee for round {round} {gc_log}") }; @@ -470,7 +506,7 @@ impl Storage { // Retrieve the signer. let signer = signature.to_address(); // Ensure the signer is in the committee. - if !previous_committee.is_committee_member(signer) { + if !committee_lookback.is_committee_member(signer) { bail!("Signer {signer} is not in the committee for round {round} {gc_log}") } // Append the signer. @@ -478,7 +514,7 @@ impl Storage { } // Ensure the signatures have reached the quorum threshold. - if !previous_committee.is_quorum_threshold_reached(&signers) { + if !committee_lookback.is_quorum_threshold_reached(&signers) { bail!("Signatures for a batch in round {round} did not reach quorum threshold {gc_log}") } Ok(missing_transmissions) @@ -499,13 +535,15 @@ impl Storage { &self, certificate: BatchCertificate, transmissions: HashMap, Transmission>, + aborted_transmissions: HashSet>, ) -> Result<()> { // Ensure the certificate round is above the GC round. ensure!(certificate.round() > self.gc_round(), "Certificate round is at or below the GC round"); // Ensure the certificate and its transmissions are valid. - let missing_transmissions = self.check_certificate(&certificate, transmissions)?; + let missing_transmissions = + self.check_certificate(&certificate, transmissions, aborted_transmissions.clone())?; // Insert the certificate into storage. - self.insert_certificate_atomic(certificate, missing_transmissions); + self.insert_certificate_atomic(certificate, aborted_transmissions, missing_transmissions); Ok(()) } @@ -517,6 +555,7 @@ impl Storage { fn insert_certificate_atomic( &self, certificate: BatchCertificate, + aborted_transmission_ids: HashSet>, missing_transmissions: HashMap, Transmission>, ) { // Retrieve the round. @@ -537,7 +576,12 @@ impl Storage { // Insert the batch ID. self.batch_ids.write().insert(batch_id, round); // Insert the certificate ID for each of the transmissions into storage. - self.transmissions.insert_transmissions(certificate_id, transmission_ids, missing_transmissions); + self.transmissions.insert_transmissions( + certificate_id, + transmission_ids, + aborted_transmission_ids, + missing_transmissions, + ); } /// Removes the given `certificate ID` from storage. @@ -610,7 +654,12 @@ impl Storage { } /// Syncs the batch certificate with the block. - pub(crate) fn sync_certificate_with_block(&self, block: &Block, certificate: &BatchCertificate) { + pub(crate) fn sync_certificate_with_block( + &self, + block: &Block, + certificate: BatchCertificate, + unconfirmed_transactions: &HashMap>, + ) { // Skip if the certificate round is below the GC round. if certificate.round() <= self.gc_round() { return; @@ -622,10 +671,12 @@ impl Storage { // Retrieve the transmissions for the certificate. let mut missing_transmissions = HashMap::new(); - // Reconstruct the unconfirmed transactions. - let mut unconfirmed_transactions = cfg_iter!(block.transactions()) - .filter_map(|tx| tx.to_unconfirmed_transaction().map(|unconfirmed| (unconfirmed.id(), unconfirmed)).ok()) - .collect::>(); + // Retrieve the aborted transmissions for the certificate. + let mut aborted_transmissions = HashSet::new(); + + // Track the block's aborted solutions and transactions. + let aborted_solutions: IndexSet<_> = block.aborted_solution_ids().iter().collect(); + let aborted_transactions: IndexSet<_> = block.aborted_transaction_ids().iter().collect(); // Iterate over the transmission IDs. for transmission_id in certificate.transmission_ids() { @@ -640,17 +691,26 @@ impl Storage { // Retrieve the transmission. match transmission_id { TransmissionID::Ratification => (), - TransmissionID::Solution(puzzle_commitment) => { + TransmissionID::Solution(solution_id) => { // Retrieve the solution. - match block.get_solution(puzzle_commitment) { + match block.get_solution(solution_id) { // Insert the solution. Some(solution) => missing_transmissions.insert(*transmission_id, (*solution).into()), // Otherwise, try to load the solution from the ledger. - None => match self.ledger.get_solution(puzzle_commitment) { + None => match self.ledger.get_solution(solution_id) { // Insert the solution. Ok(solution) => missing_transmissions.insert(*transmission_id, solution.into()), + // Check if the solution is in the aborted solutions. Err(_) => { - error!("Missing solution {puzzle_commitment} in block {}", block.height()); + // Insert the aborted solution if it exists in the block or ledger. + match aborted_solutions.contains(solution_id) + || self.ledger.contains_transmission(transmission_id).unwrap_or(false) + { + true => { + aborted_transmissions.insert(*transmission_id); + } + false => error!("Missing solution {solution_id} in block {}", block.height()), + } continue; } }, @@ -658,15 +718,24 @@ impl Storage { } TransmissionID::Transaction(transaction_id) => { // Retrieve the transaction. - match unconfirmed_transactions.remove(transaction_id) { + match unconfirmed_transactions.get(transaction_id) { // Insert the transaction. - Some(transaction) => missing_transmissions.insert(*transmission_id, transaction.into()), + Some(transaction) => missing_transmissions.insert(*transmission_id, transaction.clone().into()), // Otherwise, try to load the unconfirmed transaction from the ledger. None => match self.ledger.get_unconfirmed_transaction(*transaction_id) { // Insert the transaction. Ok(transaction) => missing_transmissions.insert(*transmission_id, transaction.into()), + // Check if the transaction is in the aborted transactions. Err(_) => { - warn!("Missing transaction {transaction_id} in block {}", block.height()); + // Insert the aborted transaction if it exists in the block or ledger. + match aborted_transactions.contains(transaction_id) + || self.ledger.contains_transmission(transmission_id).unwrap_or(false) + { + true => { + aborted_transmissions.insert(*transmission_id); + } + false => warn!("Missing transaction {transaction_id} in block {}", block.height()), + } continue; } }, @@ -681,7 +750,7 @@ impl Storage { certificate.round(), certificate.transmission_ids().len() ); - if let Err(error) = self.insert_certificate(certificate.clone(), missing_transmissions) { + if let Err(error) = self.insert_certificate(certificate, missing_transmissions, aborted_transmissions) { error!("Failed to insert certificate '{certificate_id}' from block {} - {error}", block.height()); } } @@ -745,12 +814,17 @@ impl Storage { .map(|id| (*id, Transmission::Transaction(snarkvm::ledger::narwhal::Data::Buffer(bytes::Bytes::new())))) .collect::>(); // Insert the certificate ID for each of the transmissions into storage. - self.transmissions.insert_transmissions(certificate_id, transmission_ids, missing_transmissions); + self.transmissions.insert_transmissions( + certificate_id, + transmission_ids, + Default::default(), + missing_transmissions, + ); } } #[cfg(test)] -mod tests { +pub(crate) mod tests { use super::*; use snarkos_node_bft_ledger_service::MockLedgerService; use snarkos_node_bft_storage_service::BFTMemoryService; @@ -762,7 +836,7 @@ mod tests { use ::bytes::Bytes; use indexmap::indexset; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; /// Asserts that the storage matches the expected layout. pub fn assert_storage( @@ -796,7 +870,7 @@ mod tests { } /// Samples the random transmissions, returning the missing transmissions and the transmissions. - fn sample_transmissions( + pub(crate) fn sample_transmissions( certificate: &BatchCertificate, rng: &mut TestRng, ) -> ( @@ -854,7 +928,7 @@ mod tests { let (missing_transmissions, transmissions) = sample_transmissions(&certificate, rng); // Insert the certificate. - storage.insert_certificate_atomic(certificate.clone(), missing_transmissions); + storage.insert_certificate_atomic(certificate.clone(), Default::default(), missing_transmissions); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Ensure the certificate is stored in the correct round. @@ -926,21 +1000,21 @@ mod tests { let (missing_transmissions, transmissions) = sample_transmissions(&certificate, rng); // Insert the certificate. - storage.insert_certificate_atomic(certificate.clone(), missing_transmissions.clone()); + storage.insert_certificate_atomic(certificate.clone(), Default::default(), missing_transmissions.clone()); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Check that the underlying storage representation is correct. assert_storage(&storage, &rounds, &certificates, &batch_ids, &transmissions); // Insert the certificate again - without any missing transmissions. - storage.insert_certificate_atomic(certificate.clone(), Default::default()); + storage.insert_certificate_atomic(certificate.clone(), Default::default(), Default::default()); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Check that the underlying storage representation remains unchanged. assert_storage(&storage, &rounds, &certificates, &batch_ids, &transmissions); // Insert the certificate again - with all of the original missing transmissions. - storage.insert_certificate_atomic(certificate, missing_transmissions); + storage.insert_certificate_atomic(certificate, Default::default(), missing_transmissions); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Check that the underlying storage representation remains unchanged. @@ -951,17 +1025,14 @@ mod tests { #[cfg(test)] pub mod prop_tests { use super::*; - use crate::{ - helpers::{now, storage::tests::assert_storage}, - MAX_GC_ROUNDS, - }; + use crate::helpers::{now, storage::tests::assert_storage}; use snarkos_node_bft_ledger_service::MockLedgerService; use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ ledger::{ - coinbase::PuzzleCommitment, committee::prop_tests::{CommitteeContext, ValidatorSet}, - narwhal::Data, + narwhal::{BatchHeader, Data}, + puzzle::SolutionID, }, prelude::{Signature, Uniform}, }; @@ -979,14 +1050,14 @@ pub mod prop_tests { use std::fmt::Debug; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; impl Arbitrary for Storage { type Parameters = CommitteeContext; type Strategy = BoxedStrategy>; fn arbitrary() -> Self::Strategy { - (any::(), 0..MAX_GC_ROUNDS) + (any::(), 0..BatchHeader::::MAX_GC_ROUNDS as u64) .prop_map(|(CommitteeContext(committee, _), gc_rounds)| { let ledger = Arc::new(MockLedgerService::new(committee)); Storage::::new(ledger, Arc::new(BFTMemoryService::new()), gc_rounds) @@ -995,7 +1066,7 @@ pub mod prop_tests { } fn arbitrary_with(context: Self::Parameters) -> Self::Strategy { - (Just(context), 0..MAX_GC_ROUNDS) + (Just(context), 0..BatchHeader::::MAX_GC_ROUNDS as u64) .prop_map(|(CommitteeContext(committee, _), gc_rounds)| { let ledger = Arc::new(MockLedgerService::new(committee)); Storage::::new(ledger, Arc::new(BFTMemoryService::new()), gc_rounds) @@ -1070,8 +1141,8 @@ pub mod prop_tests { .boxed() } - pub fn any_puzzle_commitment() -> BoxedStrategy> { - Just(0).prop_perturb(|_, rng| PuzzleCommitment::from_g1_affine(CryptoTestRng(rng).gen())).boxed() + pub fn any_solution_id() -> BoxedStrategy> { + Just(0).prop_perturb(|_, rng| CryptoTestRng(rng).gen::().into()).boxed() } pub fn any_transaction_id() -> BoxedStrategy<::TransactionID> { @@ -1085,7 +1156,7 @@ pub mod prop_tests { pub fn any_transmission_id() -> BoxedStrategy> { prop_oneof![ any_transaction_id().prop_map(TransmissionID::Transaction), - any_puzzle_commitment().prop_map(TransmissionID::Solution), + any_solution_id().prop_map(TransmissionID::Solution), ] .boxed() } @@ -1111,6 +1182,7 @@ pub mod prop_tests { selector: Selector, ) { let CommitteeContext(committee, ValidatorSet(validators)) = context; + let committee_id = committee.id(); // Initialize the storage. let ledger = Arc::new(MockLedgerService::new(committee)); @@ -1132,9 +1204,9 @@ pub mod prop_tests { &signer.private_key, 0, now(), + committee_id, transmission_map.keys().cloned().collect(), Default::default(), - Default::default(), &mut rng, ) .unwrap(); @@ -1174,21 +1246,21 @@ pub mod prop_tests { // Insert the certificate. let missing_transmissions: HashMap, Transmission> = transmission_map.into_iter().collect(); - storage.insert_certificate_atomic(certificate.clone(), missing_transmissions.clone()); + storage.insert_certificate_atomic(certificate.clone(), Default::default(), missing_transmissions.clone()); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Check that the underlying storage representation is correct. assert_storage(&storage, &rounds, &certificates, &batch_ids, &internal_transmissions); // Insert the certificate again - without any missing transmissions. - storage.insert_certificate_atomic(certificate.clone(), Default::default()); + storage.insert_certificate_atomic(certificate.clone(), Default::default(), Default::default()); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Check that the underlying storage representation remains unchanged. assert_storage(&storage, &rounds, &certificates, &batch_ids, &internal_transmissions); // Insert the certificate again - with all of the original missing transmissions. - storage.insert_certificate_atomic(certificate, missing_transmissions); + storage.insert_certificate_atomic(certificate, Default::default(), missing_transmissions); // Ensure the certificate exists in storage. assert!(storage.contains_certificate(certificate_id)); // Check that the underlying storage representation remains unchanged. diff --git a/node/bft/src/helpers/timestamp.rs b/node/bft/src/helpers/timestamp.rs index 0c546bd266..2b361fa3cc 100644 --- a/node/bft/src/helpers/timestamp.rs +++ b/node/bft/src/helpers/timestamp.rs @@ -28,11 +28,6 @@ pub fn check_timestamp_for_liveness(timestamp: i64) -> Result<()> { if timestamp > (now() + MAX_TIMESTAMP_DELTA_IN_SECS) { bail!("Timestamp {timestamp} is too far in the future") } - // TODO (howardwu): Ensure the timestamp is after the previous timestamp. (Needs Bullshark committee) - // // Ensure the timestamp is after the previous timestamp. - // if timestamp <= committee.previous_timestamp() { - // bail!("Timestamp {timestamp} for the proposed batch must be after the previous round timestamp") - // } Ok(()) } diff --git a/node/bft/src/lib.rs b/node/bft/src/lib.rs index ce51c986b4..ff2fe665a5 100644 --- a/node/bft/src/lib.rs +++ b/node/bft/src/lib.rs @@ -13,6 +13,7 @@ // limitations under the License. #![forbid(unsafe_code)] +#![allow(clippy::blocks_in_conditions)] #![allow(clippy::type_complexity)] #[macro_use] @@ -48,21 +49,20 @@ pub const MEMORY_POOL_PORT: u16 = 5000; // port /// The maximum number of milliseconds to wait before proposing a batch. pub const MAX_BATCH_DELAY_IN_MS: u64 = 2500; // ms -/// The maximum number of rounds to store before garbage collecting. -pub const MAX_GC_ROUNDS: u64 = 50; // rounds +/// The minimum number of seconds to wait before proposing a batch. +pub const MIN_BATCH_DELAY_IN_SECS: u64 = 1; // seconds +/// The maximum number of milliseconds to wait before timing out on a fetch. +pub const MAX_FETCH_TIMEOUT_IN_MS: u64 = 3 * MAX_BATCH_DELAY_IN_MS; // ms /// The maximum number of seconds allowed for the leader to send their certificate. pub const MAX_LEADER_CERTIFICATE_DELAY_IN_SECS: i64 = 2 * MAX_BATCH_DELAY_IN_MS as i64 / 1000; // seconds /// The maximum number of seconds before the timestamp is considered expired. pub const MAX_TIMESTAMP_DELTA_IN_SECS: i64 = 10; // seconds -/// The maximum number of transmissions allowed in a batch. -pub const MAX_TRANSMISSIONS_PER_BATCH: usize = 250; // transmissions -/// The maximum number of transmissions allowed in a worker ping. -pub const MAX_TRANSMISSIONS_PER_WORKER_PING: usize = MAX_TRANSMISSIONS_PER_BATCH / 10; // transmissions /// The maximum number of workers that can be spawned. -pub const MAX_WORKERS: u8 = 1; // workers +pub const MAX_WORKERS: u8 = 1; // worker(s) /// The frequency at which each primary broadcasts a ping to every other node. -pub const PRIMARY_PING_IN_MS: u64 = 4 * MAX_BATCH_DELAY_IN_MS; // ms +/// Note: If this is updated, be sure to update `MAX_BLOCKS_BEHIND` to correspond properly. +pub const PRIMARY_PING_IN_MS: u64 = 2 * MAX_BATCH_DELAY_IN_MS; // ms /// The frequency at which each worker broadcasts a ping to every other node. pub const WORKER_PING_IN_MS: u64 = 4 * MAX_BATCH_DELAY_IN_MS; // ms @@ -76,15 +76,3 @@ macro_rules! spawn_blocking { } }; } - -#[cfg(test)] -mod tests { - use super::*; - - type CurrentNetwork = snarkvm::console::network::Testnet3; - - #[test] - fn test_max_gc_rounds() { - assert_eq!(MAX_GC_ROUNDS as usize, snarkvm::ledger::narwhal::Subdag::::MAX_ROUNDS); - } -} diff --git a/node/bft/src/primary.rs b/node/bft/src/primary.rs index f8ca3391ec..05069d2aa4 100644 --- a/node/bft/src/primary.rs +++ b/node/bft/src/primary.rs @@ -25,6 +25,8 @@ use crate::{ PrimaryReceiver, PrimarySender, Proposal, + ProposalCache, + SignedProposals, Storage, }, spawn_blocking, @@ -33,24 +35,24 @@ use crate::{ Transport, Worker, MAX_BATCH_DELAY_IN_MS, - MAX_TRANSMISSIONS_PER_BATCH, MAX_WORKERS, + MIN_BATCH_DELAY_IN_SECS, PRIMARY_PING_IN_MS, WORKER_PING_IN_MS, }; use snarkos_account::Account; use snarkos_node_bft_events::PrimaryPing; use snarkos_node_bft_ledger_service::LedgerService; +use snarkos_node_sync::DUMMY_SELF_IP; use snarkvm::{ console::{ - account::Signature, prelude::*, types::{Address, Field}, }, ledger::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, narwhal::{BatchCertificate, BatchHeader, Data, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, prelude::committee::Committee, }; @@ -59,6 +61,7 @@ use colored::Colorize; use futures::stream::{FuturesUnordered, StreamExt}; use indexmap::{IndexMap, IndexSet}; use parking_lot::{Mutex, RwLock}; +use rayon::prelude::*; use std::{ collections::{HashMap, HashSet}, future::Future, @@ -90,8 +93,10 @@ pub struct Primary { bft_sender: Arc>>, /// The batch proposal, if the primary is currently proposing a batch. proposed_batch: Arc>, - /// The recently-signed batch proposals (a map from the address to the round, batch ID, and signature). - signed_proposals: Arc, (u64, Field, Signature)>>>, + /// The timestamp of the most recent proposed batch. + latest_proposed_batch_timestamp: Arc>, + /// The recently-signed batch proposals. + signed_proposals: Arc>>, /// The spawned handles. handles: Arc>>>, /// The lock for propose_batch. @@ -99,6 +104,9 @@ pub struct Primary { } impl Primary { + /// The maximum number of unconfirmed transmissions to send to the primary. + pub const MAX_TRANSMISSIONS_TOLERANCE: usize = BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH * 2; + /// Initializes a new primary instance. pub fn new( account: Account, @@ -109,9 +117,10 @@ impl Primary { dev: Option, ) -> Result { // Initialize the gateway. - let gateway = Gateway::new(account, ledger.clone(), ip, trusted_validators, dev)?; + let gateway = Gateway::new(account, storage.clone(), ledger.clone(), ip, trusted_validators, dev)?; // Initialize the sync module. let sync = Sync::new(gateway.clone(), storage.clone(), ledger.clone()); + // Initialize the primary instance. Ok(Self { sync, @@ -121,12 +130,52 @@ impl Primary { workers: Arc::from(vec![]), bft_sender: Default::default(), proposed_batch: Default::default(), + latest_proposed_batch_timestamp: Default::default(), signed_proposals: Default::default(), handles: Default::default(), propose_lock: Default::default(), }) } + /// Load the proposal cache file and update the Primary state with the stored data. + async fn load_proposal_cache(&self) -> Result<()> { + // Fetch the signed proposals from the file system if it exists. + match ProposalCache::::exists(self.gateway.dev()) { + // If the proposal cache exists, then process the proposal cache. + true => match ProposalCache::::load(self.gateway.account().address(), self.gateway.dev()) { + Ok(proposal_cache) => { + // Extract the proposal and signed proposals. + let (latest_certificate_round, proposed_batch, signed_proposals, pending_certificates) = + proposal_cache.into(); + + // Write the proposed batch. + *self.proposed_batch.write() = proposed_batch; + // Write the signed proposals. + *self.signed_proposals.write() = signed_proposals; + // Writ the propose lock. + *self.propose_lock.lock().await = latest_certificate_round; + + // Update the storage with the pending certificates. + for certificate in pending_certificates { + let batch_id = certificate.batch_id(); + // We use a dummy IP because the node should not need to request from any peers. + // The storage should have stored all the transmissions. If not, we simply + // skip the certificate. + if let Err(err) = self.sync_with_certificate_from_peer(DUMMY_SELF_IP, certificate).await { + warn!("Failed to load stored certificate {} from proposal cache - {err}", fmt_id(batch_id)); + } + } + Ok(()) + } + Err(err) => { + bail!("Failed to read the signed proposals from the file system - {err}."); + } + }, + // If the proposal cache does not exist, then return early. + false => Ok(()), + } + } + /// Run the primary instance. pub async fn run( &mut self, @@ -170,8 +219,12 @@ impl Primary { // First, initialize the sync channels. let (sync_sender, sync_receiver) = init_sync_channels(); - // Next, initialize the sync module. - self.sync.run(bft_sender, sync_receiver).await?; + // Next, initialize the sync module and sync the storage from ledger. + self.sync.initialize(bft_sender).await?; + // Next, load and process the proposal cache before running the sync module. + self.load_proposal_cache().await?; + // Next, run the sync module. + self.sync.run(sync_receiver).await?; // Next, initialize the gateway. self.gateway.run(primary_sender, worker_senders, Some(sync_sender)).await; // Lastly, start the primary handlers. @@ -186,6 +239,11 @@ impl Primary { self.storage.current_round() } + /// Returns `true` if the primary is synced. + pub fn is_synced(&self) -> bool { + self.sync.is_synced() + } + /// Returns the gateway. pub const fn gateway(&self) -> &Gateway { &self.gateway @@ -240,23 +298,23 @@ impl Primary { } impl Primary { - /// Returns the unconfirmed transmission IDs. - pub fn unconfirmed_transmission_ids(&self) -> impl '_ + Iterator> { + /// Returns the worker transmission IDs. + pub fn worker_transmission_ids(&self) -> impl '_ + Iterator> { self.workers.iter().flat_map(|worker| worker.transmission_ids()) } - /// Returns the unconfirmed transmissions. - pub fn unconfirmed_transmissions(&self) -> impl '_ + Iterator, Transmission)> { + /// Returns the worker transmissions. + pub fn worker_transmissions(&self) -> impl '_ + Iterator, Transmission)> { self.workers.iter().flat_map(|worker| worker.transmissions()) } - /// Returns the unconfirmed solutions. - pub fn unconfirmed_solutions(&self) -> impl '_ + Iterator, Data>)> { + /// Returns the worker solutions. + pub fn worker_solutions(&self) -> impl '_ + Iterator, Data>)> { self.workers.iter().flat_map(|worker| worker.solutions()) } - /// Returns the unconfirmed transactions. - pub fn unconfirmed_transactions(&self) -> impl '_ + Iterator>)> { + /// Returns the worker transactions. + pub fn worker_transactions(&self) -> impl '_ + Iterator>)> { self.workers.iter().flat_map(|worker| worker.transactions()) } } @@ -279,14 +337,39 @@ impl Primary { return Ok(()); } + // Retrieve the current round. + let round = self.current_round(); + // Compute the previous round. + let previous_round = round.saturating_sub(1); + + // If the current storage round is below the latest proposal round, then return early. + if round < *lock_guard { + warn!("Cannot propose a batch for round {round} - the latest proposal cache round is {}", *lock_guard); + return Ok(()); + } + // If there is a batch being proposed already, // rebroadcast the batch header to the non-signers, and return early. if let Some(proposal) = self.proposed_batch.read().as_ref() { + // Ensure that the storage is caught up to the proposal before proceeding to rebroadcast this. + if round < proposal.round() + || proposal + .batch_header() + .previous_certificate_ids() + .iter() + .any(|id| !self.storage.contains_certificate(*id)) + { + warn!( + "Cannot propose a batch for round {} - the current storage (round {round}) is not caught up to the proposed batch.", + proposal.round(), + ); + return Ok(()); + } // Construct the event. // TODO(ljedrz): the BatchHeader should be serialized only once in advance before being sent to non-signers. let event = Event::BatchPropose(proposal.batch_header().clone().into()); // Iterate through the non-signers. - for address in proposal.nonsigners(&self.ledger.get_previous_committee_for_round(proposal.round())?) { + for address in proposal.nonsigners(&self.ledger.get_committee_lookback_for_round(proposal.round())?) { // Resolve the address to the peer IP. match self.gateway.resolver().get_peer_ip_for_address(address) { // Resend the batch proposal to the validator for signing. @@ -307,12 +390,15 @@ impl Primary { return Ok(()); } - // Retrieve the current round. - let round = self.current_round(); - #[cfg(feature = "metrics")] metrics::gauge(metrics::bft::PROPOSAL_ROUND, round as f64); + // Ensure that the primary does not create a new proposal too quickly. + if let Err(e) = self.check_proposal_timestamp(previous_round, self.gateway.account().address(), now()) { + debug!("Primary is safely skipping a batch proposal - {}", format!("{e}").dimmed()); + return Ok(()); + } + // Ensure the primary has not proposed a batch for this round before. if self.storage.contains_certificate_in_round_from(round, self.gateway.account().address()) { // If a BFT sender was provided, attempt to advance the current round. @@ -329,19 +415,20 @@ impl Primary { } } } - bail!("Primary is safely skipping {}", format!("(round {round} was already certified)").dimmed()); + debug!("Primary is safely skipping {}", format!("(round {round} was already certified)").dimmed()); + return Ok(()); } + // Retrieve the committee to check against. + let committee_lookback = self.ledger.get_committee_lookback_for_round(round)?; // Check if the primary is connected to enough validators to reach quorum threshold. { - // Retrieve the committee to check against. - let committee = self.ledger.get_previous_committee_for_round(round)?; // Retrieve the connected validator addresses. let mut connected_validators = self.gateway.connected_addresses(); // Append the primary to the set. connected_validators.insert(self.gateway.account().address()); // If quorum threshold is not reached, return early. - if !committee.is_quorum_threshold_reached(&connected_validators) { + if !committee_lookback.is_quorum_threshold_reached(&connected_validators) { debug!( "Primary is safely skipping a batch proposal {}", "(please connect to more validators)".dimmed() @@ -351,8 +438,6 @@ impl Primary { } } - // Compute the previous round. - let previous_round = round.saturating_sub(1); // Retrieve the previous certificates. let previous_certificates = self.storage.get_certificates_for_round(previous_round); @@ -361,14 +446,14 @@ impl Primary { let mut is_ready = previous_round == 0; // If the previous round is not 0, check if the previous certificates have reached the quorum threshold. if previous_round > 0 { - // Retrieve the previous committee for the round. - let Ok(previous_committee) = self.ledger.get_previous_committee_for_round(previous_round) else { - bail!("Cannot propose a batch for round {round}: the previous committee is not known yet") + // Retrieve the committee lookback for the round. + let Ok(previous_committee_lookback) = self.ledger.get_committee_lookback_for_round(previous_round) else { + bail!("Cannot propose a batch for round {round}: the committee lookback is not known yet") }; // Construct a set over the authors. let authors = previous_certificates.iter().map(BatchCertificate::author).collect(); // Check if the previous certificates have reached the quorum threshold. - if previous_committee.is_quorum_threshold_reached(&authors) { + if previous_committee_lookback.is_quorum_threshold_reached(&authors) { is_ready = true; } } @@ -382,61 +467,75 @@ impl Primary { } // Determined the required number of transmissions per worker. - let num_transmissions_per_worker = MAX_TRANSMISSIONS_PER_BATCH / self.num_workers() as usize; + let num_transmissions_per_worker = BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH / self.num_workers() as usize; // Initialize the map of transmissions. let mut transmissions: IndexMap<_, _> = Default::default(); - // Initialize a tracker for the number of transactions. - let mut num_transactions = 0; // Take the transmissions from the workers. for worker in self.workers.iter() { - for (id, transmission) in worker.drain(num_transmissions_per_worker) { - // Check if the ledger already contains the transmission. - if self.ledger.contains_transmission(&id).unwrap_or(true) { - trace!("Proposing - Skipping transmission '{}' - Already in ledger", fmt_id(id)); - continue; + // Initialize a tracker for included transmissions for the current worker. + let mut num_transmissions_included_for_worker = 0; + // Keep draining the worker until the desired number of transmissions is reached or the worker is empty. + 'outer: while num_transmissions_included_for_worker < num_transmissions_per_worker { + // Determine the number of remaining transmissions for the worker. + let num_remaining_transmissions = + num_transmissions_per_worker.saturating_sub(num_transmissions_included_for_worker); + // Drain the worker. + let mut worker_transmissions = worker.drain(num_remaining_transmissions).peekable(); + // If the worker is empty, break early. + if worker_transmissions.peek().is_none() { + break 'outer; } - // Check the transmission is still valid. - match (id, transmission.clone()) { - (TransmissionID::Solution(solution_id), Transmission::Solution(solution)) => { - // Check if the solution is still valid. - if let Err(e) = self.ledger.check_solution_basic(solution_id, solution).await { - trace!("Proposing - Skipping solution '{}' - {e}", fmt_id(solution_id)); - continue; - } + // Iterate through the worker transmissions. + 'inner: for (id, transmission) in worker_transmissions { + // Check if the ledger already contains the transmission. + if self.ledger.contains_transmission(&id).unwrap_or(true) { + trace!("Proposing - Skipping transmission '{}' - Already in ledger", fmt_id(id)); + continue 'inner; } - (TransmissionID::Transaction(transaction_id), Transmission::Transaction(transaction)) => { - // Check if the transaction is still valid. - if let Err(e) = self.ledger.check_transaction_basic(transaction_id, transaction).await { - trace!("Proposing - Skipping transaction '{}' - {e}", fmt_id(transaction_id)); - continue; + // Check if the storage already contain the transmission. + // Note: We do not skip if this is the first transmission in the proposal, to ensure that + // the primary does not propose a batch with no transmissions. + if !transmissions.is_empty() && self.storage.contains_transmission(id) { + trace!("Proposing - Skipping transmission '{}' - Already in storage", fmt_id(id)); + continue 'inner; + } + // Check the transmission is still valid. + match (id, transmission.clone()) { + (TransmissionID::Solution(solution_id), Transmission::Solution(solution)) => { + // Check if the solution is still valid. + if let Err(e) = self.ledger.check_solution_basic(solution_id, solution).await { + trace!("Proposing - Skipping solution '{}' - {e}", fmt_id(solution_id)); + continue 'inner; + } } - // Increment the number of transactions. - num_transactions += 1; + (TransmissionID::Transaction(transaction_id), Transmission::Transaction(transaction)) => { + // Check if the transaction is still valid. + if let Err(e) = self.ledger.check_transaction_basic(transaction_id, transaction).await { + trace!("Proposing - Skipping transaction '{}' - {e}", fmt_id(transaction_id)); + continue 'inner; + } + } + // Note: We explicitly forbid including ratifications, + // as the protocol currently does not support ratifications. + (TransmissionID::Ratification, Transmission::Ratification) => continue, + // All other combinations are clearly invalid. + _ => continue 'inner, } - // Note: We explicitly forbid including ratifications, - // as the protocol currently does not support ratifications. - (TransmissionID::Ratification, Transmission::Ratification) => continue, - // All other combinations are clearly invalid. - _ => continue, + // Insert the transmission into the map. + transmissions.insert(id, transmission); + num_transmissions_included_for_worker += 1; } - // Insert the transmission into the map. - transmissions.insert(id, transmission); } } - // If there are no unconfirmed transmissions to propose, return early. - if transmissions.is_empty() { - debug!("Primary is safely skipping a batch proposal {}", "(no unconfirmed transmissions)".dimmed()); - return Ok(()); - } - // If there are no unconfirmed transactions to propose, return early. - if num_transactions == 0 { - debug!("Primary is safely skipping a batch proposal {}", "(no unconfirmed transactions)".dimmed()); - return Ok(()); - } - // Ditto if the batch had already been proposed. + // Ditto if the batch had already been proposed and not expired. ensure!(round > 0, "Round 0 cannot have transaction batches"); + // Determine the current timestamp. + let current_timestamp = now(); + // Determine if the current proposal is expired. if *lock_guard == round { warn!("Primary is safely skipping a batch proposal - round {round} already proposed"); + // Reinsert the transmissions back into the ready queue for the next proposal. + self.reinsert_transmissions_into_workers(transmissions)?; return Ok(()); } @@ -447,30 +546,37 @@ impl Primary { // Retrieve the private key. let private_key = *self.gateway.account().private_key(); + // Retrieve the committee ID. + let committee_id = committee_lookback.id(); // Prepare the transmission IDs. let transmission_ids = transmissions.keys().copied().collect(); // Prepare the previous batch certificate IDs. let previous_certificate_ids = previous_certificates.into_iter().map(|c| c.id()).collect(); - // Prepare the last election certificate IDs. - let last_election_certificate_ids = match self.bft_sender.get() { - Some(bft_sender) => bft_sender.get_last_election_certificate_ids().await?, - None => Default::default(), - }; - // Sign the batch header. - let batch_header = spawn_blocking!(BatchHeader::new( + // Sign the batch header and construct the proposal. + let (batch_header, proposal) = spawn_blocking!(BatchHeader::new( &private_key, round, - now(), + current_timestamp, + committee_id, transmission_ids, previous_certificate_ids, - last_election_certificate_ids, &mut rand::thread_rng() - ))?; - // Construct the proposal. - let proposal = - Proposal::new(self.ledger.get_previous_committee_for_round(round)?, batch_header.clone(), transmissions)?; + )) + .and_then(|batch_header| { + Proposal::new(committee_lookback, batch_header.clone(), transmissions.clone()) + .map(|proposal| (batch_header, proposal)) + }) + .map_err(|err| { + // On error, reinsert the transmissions and then propagate the error. + if let Err(e) = self.reinsert_transmissions_into_workers(transmissions) { + error!("Failed to reinsert transmissions: {e:?}"); + } + err + })?; // Broadcast the batch to all validators for signing. self.gateway.broadcast(Event::BatchPropose(batch_header.into())); + // Set the timestamp of the latest proposed batch. + *self.latest_proposed_batch_timestamp.write() = proposal.timestamp(); // Set the proposed batch. *self.proposed_batch.write() = Some(proposal); Ok(()) @@ -523,15 +629,29 @@ impl Primary { bail!("Invalid peer - proposed batch from myself ({batch_author})"); } + // Ensure that the batch proposal's committee ID matches the expected committee ID. + let expected_committee_id = self.ledger.get_committee_lookback_for_round(batch_round)?.id(); + if expected_committee_id != batch_header.committee_id() { + // Proceed to disconnect the validator. + self.gateway.disconnect(peer_ip); + bail!( + "Malicious peer - proposed batch has a different committee ID ({expected_committee_id} != {})", + batch_header.committee_id() + ); + } + // Retrieve the cached round and batch ID for this validator. if let Some((signed_round, signed_batch_id, signature)) = self.signed_proposals.read().get(&batch_author).copied() { + // If the signed round is ahead of the peer's batch round, then the validator is malicious. + if signed_round > batch_header.round() { + bail!("Peer ({batch_author}) proposed a batch for a previous round ({})", batch_header.round()); + } + // If the round matches and the batch ID differs, then the validator is malicious. if signed_round == batch_header.round() && signed_batch_id != batch_header.batch_id() { - // Proceed to disconnect the validator. - self.gateway.disconnect(peer_ip); - bail!("Malicious peer - proposed another batch for the same round ({signed_round})"); + bail!("Peer ({batch_author}) proposed another batch for the same round ({signed_round})"); } // If the round and batch ID matches, then skip signing the batch a second time. // Instead, rebroadcast the cached signature to the peer. @@ -550,16 +670,50 @@ impl Primary { } } + // Ensure that the batch header doesn't already exist in storage. + // Note this is already checked in `check_batch_header`, however we can return early here without creating a blocking task. + if self.storage.contains_batch(batch_header.batch_id()) { + debug!( + "Primary is safely skipping a batch proposal from '{peer_ip}' - {}", + format!("batch for round {batch_round} already exists in storage").dimmed() + ); + return Ok(()); + } + + // Compute the previous round. + let previous_round = batch_round.saturating_sub(1); + // Ensure that the peer did not propose a batch too quickly. + if let Err(e) = self.check_proposal_timestamp(previous_round, batch_author, batch_header.timestamp()) { + // Proceed to disconnect the validator. + self.gateway.disconnect(peer_ip); + bail!("Malicious peer - {e} from '{peer_ip}'"); + } + // If the peer is ahead, use the batch header to sync up to the peer. - let transmissions = self.sync_with_batch_header_from_peer(peer_ip, &batch_header).await?; + let mut transmissions = self.sync_with_batch_header_from_peer(peer_ip, &batch_header).await?; + + // Check that the transmission ids match and are not fee transactions. + if let Err(err) = cfg_iter_mut!(transmissions).try_for_each(|(transmission_id, transmission)| { + // If the transmission is not well-formed, then return early. + self.ledger.ensure_transmission_is_well_formed(*transmission_id, transmission) + }) { + debug!("Batch propose from '{peer_ip}' contains an invalid transmission - {err}",); + return Ok(()); + } // Ensure the batch is for the current round. // This method must be called after fetching previous certificates (above), // and prior to checking the batch header (below). - self.ensure_is_signing_round(batch_round)?; + if let Err(e) = self.ensure_is_signing_round(batch_round) { + // If the primary is not signing for the peer's round, then return early. + debug!("{e} from '{peer_ip}'"); + return Ok(()); + } // Ensure the batch header from the peer is valid. - let missing_transmissions = self.storage.check_batch_header(&batch_header, transmissions)?; + let (storage, header) = (self.storage.clone(), batch_header.clone()); + let missing_transmissions = + spawn_blocking!(storage.check_batch_header(&header, transmissions, Default::default()))?; // Inserts the missing transmissions into the workers. self.insert_missing_transmissions_into_workers(peer_ip, missing_transmissions.into_iter())?; @@ -576,7 +730,7 @@ impl Primary { // Note: Due to the need to sync the batch header with the peer, it is possible // for the primary to receive the same 'BatchPropose' event again, whereby only // one instance of this handler should sign the batch. This check guarantees this. - match self.signed_proposals.write().entry(batch_author) { + match self.signed_proposals.write().0.entry(batch_author) { std::collections::hash_map::Entry::Occupied(mut entry) => { // If the validator has already signed a batch for this round, then return early, // since, if the peer still has not received the signature, they will request it again, @@ -627,7 +781,7 @@ impl Primary { let BatchSignature { batch_id, signature } = batch_signature; // Retrieve the signer. - let signer = spawn_blocking!(Ok(signature.to_address()))?; + let signer = signature.to_address(); // Ensure the batch signature is signed by the validator. if self.gateway.resolver().get_address(peer_ip).map_or(true, |address| address != signer) { @@ -640,16 +794,25 @@ impl Primary { bail!("Invalid peer - received a batch signature from myself ({signer})"); } - let proposal = { + let self_ = self.clone(); + let Some(proposal) = spawn_blocking!({ // Acquire the write lock. - let mut proposed_batch = self.proposed_batch.write(); + let mut proposed_batch = self_.proposed_batch.write(); // Add the signature to the batch, and determine if the batch is ready to be certified. match proposed_batch.as_mut() { Some(proposal) => { // Ensure the batch ID matches the currently proposed batch ID. if proposal.batch_id() != batch_id { - match self.storage.contains_batch(batch_id) { - true => bail!("This batch was already certified"), + match self_.storage.contains_batch(batch_id) { + // If this batch was already certified, return early. + true => { + debug!( + "Primary is safely skipping a a batch signature from {peer_ip} for round {} - batch is already certified", + proposal.round() + ); + return Ok(None); + } + // If the batch ID is unknown, return an error. false => bail!( "Unknown batch ID '{batch_id}', expected '{}' for round {}", proposal.batch_id(), @@ -657,44 +820,50 @@ impl Primary { ), } } - // Retrieve the previous committee for the round. - let previous_committee = self.ledger.get_previous_committee_for_round(proposal.round())?; + // Retrieve the committee lookback for the round. + let committee_lookback = self_.ledger.get_committee_lookback_for_round(proposal.round())?; // Retrieve the address of the validator. - let Some(signer) = self.gateway.resolver().get_address(peer_ip) else { + let Some(signer) = self_.gateway.resolver().get_address(peer_ip) else { bail!("Signature is from a disconnected validator"); }; // Add the signature to the batch. - proposal.add_signature(signer, signature, &previous_committee)?; + proposal.add_signature(signer, signature, &committee_lookback)?; info!("Received a batch signature for round {} from '{peer_ip}'", proposal.round()); // Check if the batch is ready to be certified. - if !proposal.is_quorum_threshold_reached(&previous_committee) { + if !proposal.is_quorum_threshold_reached(&committee_lookback) { // If the batch is not ready to be certified, return early. - return Ok(()); + return Ok(None); } } // There is no proposed batch, so return early. - None => return Ok(()), + None => return Ok(None), }; // Retrieve the batch proposal, clearing the proposed batch. match proposed_batch.take() { - Some(proposal) => proposal, - None => return Ok(()), + Some(proposal) => Ok(Some(proposal)), + None => Ok(None), } + })? + else { + return Ok(()); }; /* Proceeding to certify the batch. */ info!("Quorum threshold reached - Preparing to certify our batch for round {}...", proposal.round()); - // Retrieve the previous committee for the round. - let previous_committee = self.ledger.get_previous_committee_for_round(proposal.round())?; + // Retrieve the committee lookback for the round. + let committee_lookback = self.ledger.get_committee_lookback_for_round(proposal.round())?; // Store the certified batch and broadcast it to all validators. // If there was an error storing the certificate, reinsert the transmissions back into the ready queue. - if let Err(e) = self.store_and_broadcast_certificate(&proposal, &previous_committee).await { + if let Err(e) = self.store_and_broadcast_certificate(&proposal, &committee_lookback).await { // Reinsert the transmissions back into the ready queue for the next proposal. - self.reinsert_transmissions_into_workers(proposal)?; + self.reinsert_transmissions_into_workers(proposal.into_transmissions())?; return Err(e); } + + #[cfg(feature = "metrics")] + metrics::increment_gauge(metrics::bft::CERTIFIED_BATCHES, 1.0); Ok(()) } @@ -716,6 +885,10 @@ impl Primary { // Retrieve the batch certificate author. let author = certificate.author(); + // Retrieve the batch certificate round. + let certificate_round = certificate.round(); + // Retrieve the batch certificate committee ID. + let committee_id = certificate.committee_id(); // Ensure the batch certificate is from an authorized validator. if !self.gateway.is_authorized_validator_ip(peer_ip) { @@ -731,60 +904,46 @@ impl Primary { // Store the certificate, after ensuring it is valid. self.sync_with_certificate_from_peer(peer_ip, certificate).await?; - // If there are enough certificates to reach quorum threshold for the current round, + // If there are enough certificates to reach quorum threshold for the certificate round, // then proceed to advance to the next round. - // Retrieve the current round. - let current_round = self.current_round(); - // Retrieve the previous committee. - let previous_committee = self.ledger.get_previous_committee_for_round(current_round)?; + // Retrieve the committee lookback. + let committee_lookback = self.ledger.get_committee_lookback_for_round(certificate_round)?; // Retrieve the certificates. - let certificates = self.storage.get_certificates_for_round(current_round); + let certificates = self.storage.get_certificates_for_round(certificate_round); // Construct a set over the authors. let authors = certificates.iter().map(BatchCertificate::author).collect(); // Check if the certificates have reached the quorum threshold. - let is_quorum = previous_committee.is_quorum_threshold_reached(&authors); + let is_quorum = committee_lookback.is_quorum_threshold_reached(&authors); - // Determine if we are currently proposing a round. + // Ensure that the batch certificate's committee ID matches the expected committee ID. + let expected_committee_id = committee_lookback.id(); + if expected_committee_id != committee_id { + // Proceed to disconnect the validator. + self.gateway.disconnect(peer_ip); + bail!("Batch certificate has a different committee ID ({expected_committee_id} != {committee_id})"); + } + + // Determine if we are currently proposing a round that is relevant. // Note: This is important, because while our peers have advanced, // they may not be proposing yet, and thus still able to sign our proposed batch. - let is_proposing = self.proposed_batch.read().is_some(); + let should_advance = match &*self.proposed_batch.read() { + // We advance if the proposal round is less than the current round that was just certified. + Some(proposal) => proposal.round() < certificate_round, + // If there's no proposal, we consider advancing. + None => true, + }; + + // Retrieve the current round. + let current_round = self.current_round(); // Determine whether to advance to the next round. - if is_quorum && !is_proposing { - // If we have reached the quorum threshold, then proceed to the next round. + if is_quorum && should_advance && certificate_round >= current_round { + // If we have reached the quorum threshold and the round should advance, then proceed to the next round. self.try_increment_to_the_next_round(current_round + 1).await?; } Ok(()) } - - /// Processes a batch certificate from a primary ping. - /// - /// This method performs the following steps: - /// 1. Stores the given batch certificate, after ensuring it is valid. - /// 2. If there are enough certificates to reach quorum threshold for the current round, - /// then proceed to advance to the next round. - async fn process_batch_certificate_from_ping( - &self, - peer_ip: SocketAddr, - certificate: BatchCertificate, - ) -> Result<()> { - // Ensure storage does not already contain the certificate. - if self.storage.contains_certificate(certificate.id()) { - return Ok(()); - } - - // Ensure the batch certificate is from an authorized validator. - if !self.gateway.is_authorized_validator_ip(peer_ip) { - // Proceed to disconnect the validator. - self.gateway.disconnect(peer_ip); - bail!("Malicious peer - Received a batch certificate from an unauthorized validator IP ({peer_ip})"); - } - - // Store the certificate, after ensuring it is valid. - self.sync_with_certificate_from_peer(peer_ip, certificate).await?; - Ok(()) - } } impl Primary { @@ -808,7 +967,8 @@ impl Primary { tokio::time::sleep(Duration::from_millis(PRIMARY_PING_IN_MS)).await; // Retrieve the block locators. - let block_locators = match self_.sync.get_block_locators() { + let self__ = self_.clone(); + let block_locators = match spawn_blocking!(self__.sync.get_block_locators()) { Ok(block_locators) => block_locators, Err(e) => { warn!("Failed to retrieve block locators - {e}"); @@ -848,30 +1008,8 @@ impl Primary { } }; - // Retrieve the batch certificates. - let batch_certificates = { - // Retrieve the current round. - let current_round = self_.current_round(); - // Retrieve the batch certificates for the current round. - let mut current_certificates = self_.storage.get_certificates_for_round(current_round); - // If there are no batch certificates for the current round, - // then retrieve the batch certificates for the previous round. - if current_certificates.is_empty() { - // Retrieve the previous round. - let previous_round = current_round.saturating_sub(1); - // Retrieve the batch certificates for the previous round. - current_certificates = self_.storage.get_certificates_for_round(previous_round); - } - current_certificates - }; - // Construct the primary ping. - let primary_ping = PrimaryPing::from(( - >::VERSION, - block_locators, - primary_certificate, - batch_certificates, - )); + let primary_ping = PrimaryPing::from((>::VERSION, block_locators, primary_certificate)); // Broadcast the event. self_.gateway.broadcast(Event::PrimaryPing(primary_ping)); } @@ -881,7 +1019,7 @@ impl Primary { // Start the primary ping handler. let self_ = self.clone(); self.spawn(async move { - while let Some((peer_ip, primary_certificate, batch_certificates)) = rx_primary_ping.recv().await { + while let Some((peer_ip, primary_certificate)) = rx_primary_ping.recv().await { // If the primary is not synced, then do not process the primary ping. if !self_.sync.is_synced() { trace!("Skipping a primary ping from '{peer_ip}' {}", "(node is syncing)".dimmed()); @@ -904,34 +1042,6 @@ impl Primary { } }); } - - // Iterate through the batch certificates. - for (certificate_id, certificate) in batch_certificates { - // Ensure storage does not already contain the certificate. - if self_.storage.contains_certificate(certificate_id) { - continue; - } - // Spawn a task to process the batch certificate. - let self_ = self_.clone(); - tokio::spawn(async move { - // Deserialize the batch certificate in the primary ping. - let Ok(batch_certificate) = spawn_blocking!(certificate.deserialize_blocking()) else { - warn!("Failed to deserialize batch certificate in a 'PrimaryPing' from '{peer_ip}'"); - return; - }; - // Ensure the batch certificate ID matches. - if batch_certificate.id() != certificate_id { - warn!("Batch certificate ID mismatch in a 'PrimaryPing' from '{peer_ip}'"); - // Proceed to disconnect the validator. - self_.gateway.disconnect(peer_ip); - return; - } - // Process the batch certificate. - if let Err(e) = self_.process_batch_certificate_from_ping(peer_ip, batch_certificate).await { - warn!("Cannot process a batch certificate in a 'PrimaryPing' from '{peer_ip}' - {e}"); - } - }); - } } }); @@ -965,6 +1075,12 @@ impl Primary { debug!("Skipping batch proposal {}", "(node is syncing)".dimmed()); continue; } + // A best-effort attempt to skip the scheduled batch proposal if + // round progression already triggered one. + if self_.propose_lock.try_lock().is_err() { + trace!("Skipping batch proposal {}", "(node is already proposing)".dimmed()); + continue; + }; // If there is no proposed batch, attempt to propose a batch. // Note: Do NOT spawn a task around this function call. Proposing a batch is a critical path, // and only one batch needs be proposed at a time. @@ -1039,12 +1155,52 @@ impl Primary { } }); + // Periodically try to increment to the next round. + // Note: This is necessary to ensure that the primary is not stuck on a previous round + // despite having received enough certificates to advance to the next round. + let self_ = self.clone(); + self.spawn(async move { + loop { + // Sleep briefly. + tokio::time::sleep(Duration::from_millis(MAX_BATCH_DELAY_IN_MS)).await; + // If the primary is not synced, then do not increment to the next round. + if !self_.sync.is_synced() { + trace!("Skipping round increment {}", "(node is syncing)".dimmed()); + continue; + } + // Attempt to increment to the next round. + let next_round = self_.current_round().saturating_add(1); + // Determine if the quorum threshold is reached for the current round. + let is_quorum_threshold_reached = { + // Retrieve the certificates for the next round. + let certificates = self_.storage.get_certificates_for_round(next_round); + // If there are no certificates, then skip this check. + if certificates.is_empty() { + continue; + } + let Ok(committee_lookback) = self_.ledger.get_committee_lookback_for_round(next_round) else { + warn!("Failed to retrieve the committee lookback for round {next_round}"); + continue; + }; + let authors = certificates.iter().map(BatchCertificate::author).collect(); + committee_lookback.is_quorum_threshold_reached(&authors) + }; + // Attempt to increment to the next round if the quorum threshold is reached. + if is_quorum_threshold_reached { + debug!("Quorum threshold reached for round {}", next_round); + if let Err(e) = self_.try_increment_to_the_next_round(next_round).await { + warn!("Failed to increment to the next round - {e}"); + } + } + } + }); + // Process the unconfirmed solutions. let self_ = self.clone(); self.spawn(async move { - while let Some((puzzle_commitment, prover_solution, callback)) = rx_unconfirmed_solution.recv().await { + while let Some((solution_id, solution, callback)) = rx_unconfirmed_solution.recv().await { // Compute the worker ID. - let Ok(worker_id) = assign_to_worker(puzzle_commitment, self_.num_workers()) else { + let Ok(worker_id) = assign_to_worker(solution_id, self_.num_workers()) else { error!("Unable to determine the worker ID for the unconfirmed solution"); continue; }; @@ -1053,7 +1209,7 @@ impl Primary { // Retrieve the worker. let worker = &self_.workers[worker_id as usize]; // Process the unconfirmed solution. - let result = worker.process_unconfirmed_solution(puzzle_commitment, prover_solution).await; + let result = worker.process_unconfirmed_solution(solution_id, solution).await; // Send the result to the callback. callback.send(result).ok(); }); @@ -1095,7 +1251,8 @@ impl Primary { // Reset the proposed batch. let proposal = self.proposed_batch.write().take(); if let Some(proposal) = proposal { - self.reinsert_transmissions_into_workers(proposal)?; + debug!("Cleared expired proposal for round {}", proposal.round()); + self.reinsert_transmissions_into_workers(proposal.into_transmissions())?; } } Ok(()) @@ -1176,15 +1333,42 @@ impl Primary { Ok(()) } + /// Ensure the primary is not creating batch proposals too frequently. + /// This checks that the certificate timestamp for the previous round is within the expected range. + fn check_proposal_timestamp(&self, previous_round: u64, author: Address, timestamp: i64) -> Result<()> { + // Retrieve the timestamp of the previous timestamp to check against. + let previous_timestamp = match self.storage.get_certificate_for_round_with_author(previous_round, author) { + // Ensure that the previous certificate was created at least `MIN_BATCH_DELAY_IN_MS` seconds ago. + Some(certificate) => certificate.timestamp(), + None => match self.gateway.account().address() == author { + // If we are the author, then ensure the previous proposal was created at least `MIN_BATCH_DELAY_IN_MS` seconds ago. + true => *self.latest_proposed_batch_timestamp.read(), + // If we do not see a previous certificate for the author, then proceed optimistically. + false => return Ok(()), + }, + }; + + // Determine the elapsed time since the previous timestamp. + let elapsed = timestamp + .checked_sub(previous_timestamp) + .ok_or_else(|| anyhow!("Timestamp cannot be before the previous certificate at round {previous_round}"))?; + // Ensure that the previous certificate was created at least `MIN_BATCH_DELAY_IN_MS` seconds ago. + match elapsed < MIN_BATCH_DELAY_IN_SECS as i64 { + true => bail!("Timestamp is too soon after the previous certificate at round {previous_round}"), + false => Ok(()), + } + } + /// Stores the certified batch and broadcasts it to all validators, returning the certificate. async fn store_and_broadcast_certificate(&self, proposal: &Proposal, committee: &Committee) -> Result<()> { // Create the batch certificate and transmissions. - let (certificate, transmissions) = proposal.to_certificate(committee)?; + let (certificate, transmissions) = tokio::task::block_in_place(|| proposal.to_certificate(committee))?; // Convert the transmissions into a HashMap. // Note: Do not change the `Proposal` to use a HashMap. The ordering there is necessary for safety. let transmissions = transmissions.into_iter().collect::>(); // Store the certified batch. - self.storage.insert_certificate(certificate.clone(), transmissions)?; + let (storage, certificate_) = (self.storage.clone(), certificate.clone()); + spawn_blocking!(storage.insert_certificate(certificate_, transmissions, Default::default()))?; debug!("Stored a batch certificate for round {}", certificate.round()); // If a BFT sender was provided, send the certificate to the BFT. if let Some(bft_sender) = self.bft_sender.get() { @@ -1217,15 +1401,14 @@ impl Primary { } /// Re-inserts the transmissions from the proposal into the workers. - fn reinsert_transmissions_into_workers(&self, proposal: Proposal) -> Result<()> { + fn reinsert_transmissions_into_workers( + &self, + transmissions: IndexMap, Transmission>, + ) -> Result<()> { // Re-insert the transmissions into the workers. - assign_to_workers( - &self.workers, - proposal.into_transmissions().into_iter(), - |worker, transmission_id, transmission| { - worker.reinsert(transmission_id, transmission); - }, - ) + assign_to_workers(&self.workers, transmissions.into_iter(), |worker, transmission_id, transmission| { + worker.reinsert(transmission_id, transmission); + }) } /// Recursively stores a given batch certificate, after ensuring: @@ -1263,7 +1446,8 @@ impl Primary { // Check if the certificate needs to be stored. if !self.storage.contains_certificate(certificate.id()) { // Store the batch certificate. - self.storage.insert_certificate(certificate.clone(), missing_transmissions)?; + let (storage, certificate_) = (self.storage.clone(), certificate.clone()); + spawn_blocking!(storage.insert_certificate(certificate_, missing_transmissions, Default::default()))?; debug!("Stored a batch certificate for round {batch_round} from '{peer_ip}'"); // If a BFT sender was provided, send the round and certificate to the BFT. if let Some(bft_sender) = self.bft_sender.get() { @@ -1295,8 +1479,8 @@ impl Primary { let is_quorum_threshold_reached = { let certificates = self.storage.get_certificates_for_round(batch_round); let authors = certificates.iter().map(BatchCertificate::author).collect(); - let previous_committee = self.ledger.get_previous_committee_for_round(batch_round)?; - previous_committee.is_quorum_threshold_reached(&authors) + let committee_lookback = self.ledger.get_committee_lookback_for_round(batch_round)?; + committee_lookback.is_quorum_threshold_reached(&authors) }; // Check if our primary should move to the next round. @@ -1317,18 +1501,7 @@ impl Primary { self.fetch_missing_previous_certificates(peer_ip, batch_header).await.map_err(|e| { anyhow!("Failed to fetch missing previous certificates for round {batch_round} from '{peer_ip}' - {e}") })?; - // Ensure the primary has all of the election certificates. - let missing_election_certificates = match self.fetch_missing_election_certificates(peer_ip, batch_header).await - { - Ok(missing_election_certificates) => missing_election_certificates, - Err(e) => { - // TODO (howardwu): Change this to return early, once we have persistence on the election certificates. - error!("Failed to fetch missing election certificates for round {batch_round} from '{peer_ip}' - {e}"); - // Note: We do not return early on error, because we can still proceed without the election certificates, - // albeit with reduced safety guarantees for commits. This is not a long-term solution. - Default::default() - } - }; + // Ensure the primary has all of the transmissions. let missing_transmissions = self.fetch_missing_transmissions(peer_ip, batch_header).await.map_err(|e| { anyhow!("Failed to fetch missing transmissions for round {batch_round} from '{peer_ip}' - {e}") @@ -1339,11 +1512,6 @@ impl Primary { // Store the batch certificate (recursively fetching any missing previous certificates). self.sync_with_certificate_from_peer(peer_ip, batch_certificate).await?; } - // Iterate through the missing election certificates. - for batch_certificate in missing_election_certificates { - // Store the batch certificate (recursively fetching any missing previous certificates). - self.sync_with_certificate_from_peer(peer_ip, batch_certificate).await?; - } Ok(missing_transmissions) } @@ -1359,9 +1527,10 @@ impl Primary { return Ok(Default::default()); } - // Ensure this batch ID is new. + // Ensure this batch ID is new, otherwise return early. if self.storage.contains_batch(batch_header.batch_id()) { - bail!("Batch for round {} from peer has already been processed", batch_header.round()) + trace!("Batch for round {} from peer has already been processed", batch_header.round()); + return Ok(Default::default()); } // Retrieve the workers. @@ -1426,32 +1595,6 @@ impl Primary { Ok(missing_previous_certificates) } - /// Fetches any missing election certificates for the specified batch header from the specified peer. - async fn fetch_missing_election_certificates( - &self, - peer_ip: SocketAddr, - batch_header: &BatchHeader, - ) -> Result>> { - // Retrieve the round. - let round = batch_header.round(); - // If the previous round is 0, or is <= the GC round, return early. - if round == 1 || round <= self.storage.gc_round() + 1 { - return Ok(Default::default()); - } - - // Fetch the missing election certificates. - let missing_election_certificates = - self.fetch_missing_certificates(peer_ip, round, batch_header.last_election_certificate_ids()).await?; - if !missing_election_certificates.is_empty() { - debug!( - "Fetched {} missing election certificates for round {round} from '{peer_ip}'", - missing_election_certificates.len(), - ); - } - // Return the missing election certificates. - Ok(missing_election_certificates) - } - /// Fetches any missing certificates for the specified batch header from the specified peer. async fn fetch_missing_certificates( &self, @@ -1510,6 +1653,17 @@ impl Primary { self.workers.iter().for_each(|worker| worker.shut_down()); // Abort the tasks. self.handles.lock().iter().for_each(|handle| handle.abort()); + // Save the current proposal cache to disk. + let proposal_cache = { + let proposal = self.proposed_batch.write().take(); + let signed_proposals = self.signed_proposals.read().clone(); + let latest_round = proposal.as_ref().map(Proposal::round).unwrap_or(*self.propose_lock.lock().await); + let pending_certificates = self.storage.get_pending_certificates(); + ProposalCache::new(latest_round, proposal, signed_proposals, pending_certificates) + }; + if let Err(err) = proposal_cache.store(self.gateway.dev()) { + error!("Failed to store the current proposal cache: {err}"); + } // Close the gateway. self.gateway.shut_down().await; } @@ -1529,7 +1683,7 @@ mod tests { use indexmap::IndexSet; use rand::RngCore; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; // Returns a primary and a list of accounts in the configured committee. async fn primary_without_handlers( @@ -1575,20 +1729,17 @@ mod tests { } // Creates a mock solution. - fn sample_unconfirmed_solution( - rng: &mut TestRng, - ) -> (PuzzleCommitment, Data>) { - // Sample a random fake puzzle commitment. - let affine = rng.gen(); - let commitment = PuzzleCommitment::::from_g1_affine(affine); + fn sample_unconfirmed_solution(rng: &mut TestRng) -> (SolutionID, Data>) { + // Sample a random fake solution ID. + let solution_id = rng.gen::().into(); // Vary the size of the solutions. let size = rng.gen_range(1024..10 * 1024); // Sample random fake solution bytes. let mut vec = vec![0u8; size]; rng.fill_bytes(&mut vec); let solution = Data::Buffer(Bytes::from(vec)); - // Return the ID and solution. - (commitment, solution) + // Return the solution ID and solution. + (solution_id, solution) } // Creates a mock transaction. @@ -1616,15 +1767,15 @@ mod tests { timestamp: i64, rng: &mut TestRng, ) -> Proposal { - let (solution_commitment, solution) = sample_unconfirmed_solution(rng); + let (solution_id, solution) = sample_unconfirmed_solution(rng); let (transaction_id, transaction) = sample_unconfirmed_transaction(rng); // Retrieve the private key. let private_key = author.private_key(); // Prepare the transmission IDs. - let transmission_ids = [solution_commitment.into(), (&transaction_id).into()].into(); + let transmission_ids = [solution_id.into(), (&transaction_id).into()].into(); let transmissions = [ - (solution_commitment.into(), Transmission::Solution(solution)), + (solution_id.into(), Transmission::Solution(solution)), ((&transaction_id).into(), Transmission::Transaction(transaction)), ] .into(); @@ -1633,9 +1784,9 @@ mod tests { private_key, round, timestamp, + committee.id(), transmission_ids, previous_certificate_ids, - Default::default(), rng, ) .unwrap(); @@ -1696,11 +1847,12 @@ mod tests { accounts.iter().find(|&(_, acct)| acct.address() == primary_address).map(|(_, acct)| acct.clone()).unwrap(); let private_key = author.private_key(); - let (solution_commitment, solution) = sample_unconfirmed_solution(rng); + let committee_id = Field::rand(rng); + let (solution_id, solution) = sample_unconfirmed_solution(rng); let (transaction_id, transaction) = sample_unconfirmed_transaction(rng); - let transmission_ids = [solution_commitment.into(), (&transaction_id).into()].into(); + let transmission_ids = [solution_id.into(), (&transaction_id).into()].into(); let transmissions = [ - (solution_commitment.into(), Transmission::Solution(solution)), + (solution_id.into(), Transmission::Solution(solution)), ((&transaction_id).into(), Transmission::Transaction(transaction)), ] .into(); @@ -1709,9 +1861,9 @@ mod tests { private_key, round, timestamp, + committee_id, transmission_ids, previous_certificate_ids, - Default::default(), rng, ) .unwrap(); @@ -1739,7 +1891,7 @@ mod tests { rng, ); next_certificates.insert(certificate.id()); - assert!(primary.storage.insert_certificate(certificate, transmissions).is_ok()); + assert!(primary.storage.insert_certificate(certificate, transmissions, Default::default()).is_ok()); } assert!(primary.storage.increment_to_next_round(cur_round).is_ok()); @@ -1767,17 +1919,12 @@ mod tests { // Check there is no batch currently proposed. assert!(primary.proposed_batch.read().is_none()); - // Try to propose a batch. There are no transmissions in the workers so the method should - // just return without proposing a batch. - assert!(primary.propose_batch().await.is_ok()); - assert!(primary.proposed_batch.read().is_none()); - // Generate a solution and a transaction. - let (solution_commitment, solution) = sample_unconfirmed_solution(&mut rng); + let (solution_id, solution) = sample_unconfirmed_solution(&mut rng); let (transaction_id, transaction) = sample_unconfirmed_transaction(&mut rng); // Store it on one of the workers. - primary.workers[0].process_unconfirmed_solution(solution_commitment, solution).await.unwrap(); + primary.workers[0].process_unconfirmed_solution(solution_id, solution).await.unwrap(); primary.workers[0].process_unconfirmed_transaction(transaction_id, transaction).await.unwrap(); // Try to propose a batch again. This time, it should succeed. @@ -1785,6 +1932,19 @@ mod tests { assert!(primary.proposed_batch.read().is_some()); } + #[tokio::test] + async fn test_propose_batch_with_no_transmissions() { + let mut rng = TestRng::default(); + let (primary, _) = primary_without_handlers(&mut rng).await; + + // Check there is no batch currently proposed. + assert!(primary.proposed_batch.read().is_none()); + + // Try to propose a batch with no transmissions. + assert!(primary.propose_batch().await.is_ok()); + assert!(primary.proposed_batch.read().is_some()); + } + #[tokio::test] async fn test_propose_batch_in_round() { let round = 3; @@ -1794,10 +1954,40 @@ mod tests { // Fill primary storage. store_certificate_chain(&primary, &accounts, round, &mut rng); - // Try to propose a batch. There are no transmissions in the workers so the method should - // just return without proposing a batch. + // Sleep for a while to ensure the primary is ready to propose the next round. + tokio::time::sleep(Duration::from_secs(MIN_BATCH_DELAY_IN_SECS)).await; + + // Generate a solution and a transaction. + let (solution_id, solution) = sample_unconfirmed_solution(&mut rng); + let (transaction_id, transaction) = sample_unconfirmed_transaction(&mut rng); + + // Store it on one of the workers. + primary.workers[0].process_unconfirmed_solution(solution_id, solution).await.unwrap(); + primary.workers[0].process_unconfirmed_transaction(transaction_id, transaction).await.unwrap(); + + // Propose a batch again. This time, it should succeed. assert!(primary.propose_batch().await.is_ok()); - assert!(primary.proposed_batch.read().is_none()); + assert!(primary.proposed_batch.read().is_some()); + } + + #[tokio::test] + async fn test_propose_batch_skip_transmissions_from_previous_certificates() { + let round = 3; + let prev_round = round - 1; + let mut rng = TestRng::default(); + let (primary, accounts) = primary_without_handlers(&mut rng).await; + let peer_account = &accounts[1]; + let peer_ip = peer_account.0; + + // Fill primary storage. + store_certificate_chain(&primary, &accounts, round, &mut rng); + + // Get transmissions from previous certificates. + let previous_certificate_ids: IndexSet<_> = + primary.storage.get_certificates_for_round(prev_round).iter().map(|cert| cert.id()).collect(); + + // Track the number of transmissions in the previous round. + let mut num_transmissions_in_previous_round = 0; // Generate a solution and a transaction. let (solution_commitment, solution) = sample_unconfirmed_solution(&mut rng); @@ -1807,9 +1997,46 @@ mod tests { primary.workers[0].process_unconfirmed_solution(solution_commitment, solution).await.unwrap(); primary.workers[0].process_unconfirmed_transaction(transaction_id, transaction).await.unwrap(); - // Propose a batch again. This time, it should succeed. + // Check that the worker has 2 transmissions. + assert_eq!(primary.workers[0].num_transmissions(), 2); + + // Create certificates for the current round and add the transmissions to the worker before inserting the certificate to storage. + for (_, account) in accounts.iter() { + let (certificate, transmissions) = create_batch_certificate( + account.address(), + &accounts, + round, + previous_certificate_ids.clone(), + &mut rng, + ); + + // Add the transmissions to the worker. + for (transmission_id, transmission) in transmissions.iter() { + primary.workers[0].process_transmission_from_peer(peer_ip, *transmission_id, transmission.clone()); + } + + // Insert the certificate to storage. + num_transmissions_in_previous_round += transmissions.len(); + primary.storage.insert_certificate(certificate, transmissions, Default::default()).unwrap(); + } + + // Sleep for a while to ensure the primary is ready to propose the next round. + tokio::time::sleep(Duration::from_secs(MIN_BATCH_DELAY_IN_SECS)).await; + + // Advance to the next round. + assert!(primary.storage.increment_to_next_round(round).is_ok()); + + // Check that the worker has `num_transmissions_in_previous_round + 2` transmissions. + assert_eq!(primary.workers[0].num_transmissions(), num_transmissions_in_previous_round + 2); + + // Propose the batch. assert!(primary.propose_batch().await.is_ok()); - assert!(primary.proposed_batch.read().is_some()); + + // Check that the proposal only contains the new transmissions that were not in previous certificates. + let proposed_transmissions = primary.proposed_batch.read().as_ref().unwrap().transmissions().clone(); + assert_eq!(proposed_transmissions.len(), 2); + assert!(proposed_transmissions.contains_key(&TransmissionID::Solution(solution_commitment))); + assert!(proposed_transmissions.contains_key(&TransmissionID::Transaction(transaction_id))); } #[tokio::test] @@ -1821,7 +2048,7 @@ mod tests { let round = 1; let peer_account = &accounts[1]; let peer_ip = peer_account.0; - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( &peer_account.1, primary.ledger.current_committee().unwrap(), @@ -1857,7 +2084,7 @@ mod tests { // Create a valid proposal with an author that isn't the primary. let peer_account = &accounts[1]; let peer_ip = peer_account.0; - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( &peer_account.1, primary.ledger.current_committee().unwrap(), @@ -1888,7 +2115,7 @@ mod tests { let round = 1; let peer_account = &accounts[1]; let peer_ip = peer_account.0; - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( &peer_account.1, primary.ledger.current_committee().unwrap(), @@ -1930,7 +2157,7 @@ mod tests { // Create a valid proposal with an author that isn't the primary. let peer_account = &accounts[1]; let peer_ip = peer_account.0; - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( &peer_account.1, primary.ledger.current_committee().unwrap(), @@ -1961,6 +2188,140 @@ mod tests { } #[tokio::test] + async fn test_batch_propose_from_peer_with_invalid_timestamp() { + let round = 2; + let mut rng = TestRng::default(); + let (primary, accounts) = primary_without_handlers(&mut rng).await; + + // Generate certificates. + let previous_certificates = store_certificate_chain(&primary, &accounts, round, &mut rng); + + // Create a valid proposal with an author that isn't the primary. + let peer_account = &accounts[1]; + let peer_ip = peer_account.0; + let invalid_timestamp = now(); // Use a timestamp that is too early. + let proposal = create_test_proposal( + &peer_account.1, + primary.ledger.current_committee().unwrap(), + round, + previous_certificates, + invalid_timestamp, + &mut rng, + ); + + // Make sure the primary is aware of the transmissions in the proposal. + for (transmission_id, transmission) in proposal.transmissions() { + primary.workers[0].process_transmission_from_peer(peer_ip, *transmission_id, transmission.clone()) + } + + // The author must be known to resolver to pass propose checks. + primary.gateway.resolver().insert_peer(peer_ip, peer_ip, peer_account.1.address()); + + // Try to process the batch proposal from the peer, should error. + assert!( + primary.process_batch_propose_from_peer(peer_ip, (*proposal.batch_header()).clone().into()).await.is_err() + ); + } + + #[tokio::test] + async fn test_batch_propose_from_peer_with_past_timestamp() { + let round = 2; + let mut rng = TestRng::default(); + let (primary, accounts) = primary_without_handlers(&mut rng).await; + + // Generate certificates. + let previous_certificates = store_certificate_chain(&primary, &accounts, round, &mut rng); + + // Create a valid proposal with an author that isn't the primary. + let peer_account = &accounts[1]; + let peer_ip = peer_account.0; + let past_timestamp = now() - 5; // Use a timestamp that is in the past. + let proposal = create_test_proposal( + &peer_account.1, + primary.ledger.current_committee().unwrap(), + round, + previous_certificates, + past_timestamp, + &mut rng, + ); + + // Make sure the primary is aware of the transmissions in the proposal. + for (transmission_id, transmission) in proposal.transmissions() { + primary.workers[0].process_transmission_from_peer(peer_ip, *transmission_id, transmission.clone()) + } + + // The author must be known to resolver to pass propose checks. + primary.gateway.resolver().insert_peer(peer_ip, peer_ip, peer_account.1.address()); + + // Try to process the batch proposal from the peer, should error. + assert!( + primary.process_batch_propose_from_peer(peer_ip, (*proposal.batch_header()).clone().into()).await.is_err() + ); + } + + #[tokio::test] + async fn test_propose_batch_with_storage_round_behind_proposal_lock() { + let round = 3; + let mut rng = TestRng::default(); + let (primary, _) = primary_without_handlers(&mut rng).await; + + // Check there is no batch currently proposed. + assert!(primary.proposed_batch.read().is_none()); + + // Generate a solution and a transaction. + let (solution_id, solution) = sample_unconfirmed_solution(&mut rng); + let (transaction_id, transaction) = sample_unconfirmed_transaction(&mut rng); + + // Store it on one of the workers. + primary.workers[0].process_unconfirmed_solution(solution_id, solution).await.unwrap(); + primary.workers[0].process_unconfirmed_transaction(transaction_id, transaction).await.unwrap(); + + // Set the proposal lock to a round ahead of the storage. + let old_proposal_lock_round = *primary.propose_lock.lock().await; + *primary.propose_lock.lock().await = round + 1; + + // Propose a batch and enforce that it fails. + assert!(primary.propose_batch().await.is_ok()); + assert!(primary.proposed_batch.read().is_none()); + + // Set the proposal lock back to the old round. + *primary.propose_lock.lock().await = old_proposal_lock_round; + + // Try to propose a batch again. This time, it should succeed. + assert!(primary.propose_batch().await.is_ok()); + assert!(primary.proposed_batch.read().is_some()); + } + + #[tokio::test] + async fn test_propose_batch_with_storage_round_behind_proposal() { + let round = 5; + let mut rng = TestRng::default(); + let (primary, accounts) = primary_without_handlers(&mut rng).await; + + // Generate previous certificates. + let previous_certificates = store_certificate_chain(&primary, &accounts, round, &mut rng); + + // Create a valid proposal. + let timestamp = now(); + let proposal = create_test_proposal( + primary.gateway.account(), + primary.ledger.current_committee().unwrap(), + round + 1, + previous_certificates, + timestamp, + &mut rng, + ); + + // Store the proposal on the primary. + *primary.proposed_batch.write() = Some(proposal); + + // Try to propose a batch will terminate early because the storage is behind the proposal. + assert!(primary.propose_batch().await.is_ok()); + assert!(primary.proposed_batch.read().is_some()); + assert!(primary.proposed_batch.read().as_ref().unwrap().round() > primary.current_round()); + } + + #[tokio::test(flavor = "multi_thread")] async fn test_batch_signature_from_peer() { let mut rng = TestRng::default(); let (primary, accounts) = primary_without_handlers(&mut rng).await; @@ -1968,7 +2329,7 @@ mod tests { // Create a valid proposal. let round = 1; - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( primary.gateway.account(), primary.ledger.current_committee().unwrap(), @@ -1995,7 +2356,7 @@ mod tests { assert_eq!(primary.current_round(), round + 1); } - #[tokio::test] + #[tokio::test(flavor = "multi_thread")] async fn test_batch_signature_from_peer_in_round() { let round = 5; let mut rng = TestRng::default(); @@ -2041,7 +2402,7 @@ mod tests { // Create a valid proposal. let round = 1; - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( primary.gateway.account(), primary.ledger.current_committee().unwrap(), @@ -2078,7 +2439,7 @@ mod tests { let previous_certificates = store_certificate_chain(&primary, &accounts, round, &mut rng); // Create a valid proposal. - let timestamp = now(); + let timestamp = now() + MIN_BATCH_DELAY_IN_SECS as i64; let proposal = create_test_proposal( primary.gateway.account(), primary.ledger.current_committee().unwrap(), @@ -2103,4 +2464,87 @@ mod tests { // Check the round was incremented. assert_eq!(primary.current_round(), round); } + + #[tokio::test] + async fn test_insert_certificate_with_aborted_transmissions() { + let round = 3; + let prev_round = round - 1; + let mut rng = TestRng::default(); + let (primary, accounts) = primary_without_handlers(&mut rng).await; + let peer_account = &accounts[1]; + let peer_ip = peer_account.0; + + // Fill primary storage. + store_certificate_chain(&primary, &accounts, round, &mut rng); + + // Get transmissions from previous certificates. + let previous_certificate_ids: IndexSet<_> = + primary.storage.get_certificates_for_round(prev_round).iter().map(|cert| cert.id()).collect(); + + // Generate a solution and a transaction. + let (solution_commitment, solution) = sample_unconfirmed_solution(&mut rng); + let (transaction_id, transaction) = sample_unconfirmed_transaction(&mut rng); + + // Store it on one of the workers. + primary.workers[0].process_unconfirmed_solution(solution_commitment, solution).await.unwrap(); + primary.workers[0].process_unconfirmed_transaction(transaction_id, transaction).await.unwrap(); + + // Check that the worker has 2 transmissions. + assert_eq!(primary.workers[0].num_transmissions(), 2); + + // Create certificates for the current round. + let account = accounts[0].1.clone(); + let (certificate, transmissions) = + create_batch_certificate(account.address(), &accounts, round, previous_certificate_ids.clone(), &mut rng); + let certificate_id = certificate.id(); + + // Randomly abort some of the transmissions. + let mut aborted_transmissions = HashSet::new(); + let mut transmissions_without_aborted = HashMap::new(); + for (transmission_id, transmission) in transmissions.clone() { + match rng.gen::() || aborted_transmissions.is_empty() { + true => { + // Insert the aborted transmission. + aborted_transmissions.insert(transmission_id); + } + false => { + // Insert the transmission without the aborted transmission. + transmissions_without_aborted.insert(transmission_id, transmission); + } + }; + } + + // Add the non-aborted transmissions to the worker. + for (transmission_id, transmission) in transmissions_without_aborted.iter() { + primary.workers[0].process_transmission_from_peer(peer_ip, *transmission_id, transmission.clone()); + } + + // Check that inserting the transmission with missing transmissions fails. + assert!( + primary + .storage + .check_certificate(&certificate, transmissions_without_aborted.clone(), Default::default()) + .is_err() + ); + assert!( + primary + .storage + .insert_certificate(certificate.clone(), transmissions_without_aborted.clone(), Default::default()) + .is_err() + ); + + // Insert the certificate to storage. + primary + .storage + .insert_certificate(certificate, transmissions_without_aborted, aborted_transmissions.clone()) + .unwrap(); + + // Ensure the certificate exists in storage. + assert!(primary.storage.contains_certificate(certificate_id)); + // Ensure that the aborted transmission IDs exist in storage. + for aborted_transmission_id in aborted_transmissions { + assert!(primary.storage.contains_transmission(aborted_transmission_id)); + assert!(primary.storage.get_transmission(aborted_transmission_id).is_none()); + } + } } diff --git a/node/bft/src/sync/mod.rs b/node/bft/src/sync/mod.rs index 517dbca5eb..e5dc84731b 100644 --- a/node/bft/src/sync/mod.rs +++ b/node/bft/src/sync/mod.rs @@ -13,10 +13,11 @@ // limitations under the License. use crate::{ - helpers::{BFTSender, Pending, Storage, SyncReceiver}, + helpers::{fmt_id, max_redundant_requests, BFTSender, Pending, Storage, SyncReceiver}, + spawn_blocking, Gateway, Transport, - MAX_BATCH_DELAY_IN_MS, + MAX_FETCH_TIMEOUT_IN_MS, PRIMARY_PING_IN_MS, }; use snarkos_node_bft_events::{CertificateRequest, CertificateResponse, Event}; @@ -25,11 +26,13 @@ use snarkos_node_sync::{locators::BlockLocators, BlockSync, BlockSyncMode}; use snarkvm::{ console::{network::Network, types::Field}, ledger::{authority::Authority, block::Block, narwhal::BatchCertificate}, + prelude::{cfg_into_iter, cfg_iter}, }; use anyhow::{bail, Result}; use parking_lot::Mutex; -use std::{future::Future, net::SocketAddr, sync::Arc}; +use rayon::prelude::*; +use std::{collections::HashMap, future::Future, net::SocketAddr, sync::Arc, time::Duration}; use tokio::{ sync::{oneshot, Mutex as TMutex, OnceCell}, task::JoinHandle, @@ -51,8 +54,12 @@ pub struct Sync { bft_sender: Arc>>, /// The spawned handles. handles: Arc>>>, + /// The response lock. + response_lock: Arc>, /// The sync lock. - lock: Arc>, + sync_lock: Arc>, + /// The latest block responses. + latest_block_responses: Arc>>>, } impl Sync { @@ -69,12 +76,14 @@ impl Sync { pending: Default::default(), bft_sender: Default::default(), handles: Default::default(), - lock: Default::default(), + response_lock: Default::default(), + sync_lock: Default::default(), + latest_block_responses: Default::default(), } } - /// Starts the sync module. - pub async fn run(&self, bft_sender: Option>, sync_receiver: SyncReceiver) -> Result<()> { + /// Initializes the sync module and sync the storage with the ledger at bootup. + pub async fn initialize(&self, bft_sender: Option>) -> Result<()> { // If a BFT sender was provided, set it. if let Some(bft_sender) = bft_sender { self.bft_sender.set(bft_sender).expect("BFT sender already set in gateway"); @@ -83,23 +92,57 @@ impl Sync { info!("Syncing storage with the ledger..."); // Sync the storage with the ledger. - self.sync_storage_with_ledger_at_bootup().await?; + self.sync_storage_with_ledger_at_bootup().await + } + /// Starts the sync module. + pub async fn run(&self, sync_receiver: SyncReceiver) -> Result<()> { info!("Starting the sync module..."); // Start the block sync loop. let self_ = self.clone(); self.handles.lock().push(tokio::spawn(async move { + // Sleep briefly to allow an initial primary ping to come in prior to entering the loop. + // Ideally, a node does not consider itself synced when it has not received + // any block locators from peer. However, in the initial bootup of validators, + // this needs to happen, so we use this additional sleep as a grace period. + tokio::time::sleep(Duration::from_millis(PRIMARY_PING_IN_MS)).await; loop { // Sleep briefly to avoid triggering spam detection. - tokio::time::sleep(std::time::Duration::from_millis(PRIMARY_PING_IN_MS)).await; + tokio::time::sleep(Duration::from_millis(PRIMARY_PING_IN_MS)).await; // Perform the sync routine. let communication = &self_.gateway; // let communication = &node.router; self_.block_sync.try_block_sync(communication).await; + + // Sync the storage with the blocks. + if let Err(e) = self_.sync_storage_with_blocks().await { + error!("Unable to sync storage with blocks - {e}"); + } + + // If the node is synced, clear the `latest_block_responses`. + if self_.is_synced() { + self_.latest_block_responses.lock().await.clear(); + } } })); + // Start the pending queue expiration loop. + let self_ = self.clone(); + self.spawn(async move { + loop { + // Sleep briefly. + tokio::time::sleep(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS)).await; + + // Remove the expired pending transmission requests. + let self__ = self_.clone(); + let _ = spawn_blocking!({ + self__.pending.clear_expired_callbacks(); + Ok(()) + }); + } + }); + // Retrieve the sync receiver. let SyncReceiver { mut rx_block_sync_advance_with_sync_blocks, @@ -183,14 +226,16 @@ impl Sync { // Retrieve the block height. let block_height = latest_block.height(); + // Determine the number of maximum number of blocks that would have been garbage collected. + let max_gc_blocks = u32::try_from(self.storage.max_gc_rounds())?.saturating_div(2); // Determine the earliest height, conservatively set to the block height minus the max GC rounds. // By virtue of the BFT protocol, we can guarantee that all GC range blocks will be loaded. - let gc_height = block_height.saturating_sub(u32::try_from(self.storage.max_gc_rounds())?); + let gc_height = block_height.saturating_sub(max_gc_blocks); // Retrieve the blocks. let blocks = self.ledger.get_blocks(gc_height..block_height.saturating_add(1))?; // Acquire the sync lock. - let _lock = self.lock.lock().await; + let _lock = self.sync_lock.lock().await; debug!("Syncing storage with the ledger from block {} to {}...", gc_height, block_height.saturating_add(1)); @@ -200,38 +245,30 @@ impl Sync { self.storage.sync_height_with_block(latest_block.height()); // Sync the round with the block. self.storage.sync_round_with_block(latest_block.round()); + // Perform GC on the latest block round. + self.storage.garbage_collect_certificates(latest_block.round()); // Iterate over the blocks. for block in &blocks { // If the block authority is a subdag, then sync the batch certificates with the block. if let Authority::Quorum(subdag) = block.authority() { + // Reconstruct the unconfirmed transactions. + let unconfirmed_transactions = cfg_iter!(block.transactions()) + .filter_map(|tx| { + tx.to_unconfirmed_transaction().map(|unconfirmed| (unconfirmed.id(), unconfirmed)).ok() + }) + .collect::>(); + // Iterate over the certificates. - for certificate in subdag.values().flatten() { - // Sync the batch certificate with the block. - self.storage.sync_certificate_with_block(block, certificate); + for certificates in subdag.values().cloned() { + cfg_into_iter!(certificates).for_each(|certificate| { + self.storage.sync_certificate_with_block(block, certificate, &unconfirmed_transactions); + }); } } } /* Sync the BFT DAG */ - // Retrieve the leader certificates. - let leader_certificates = blocks - .iter() - .flat_map(|block| { - match block.authority() { - // If the block authority is a beacon, then skip the block. - Authority::Beacon(_) => None, - // If the block authority is a subdag, then retrieve the certificates. - Authority::Quorum(subdag) => { - Some((subdag.leader_certificate().clone(), subdag.election_certificate_ids().clone())) - } - } - }) - .collect::>(); - if leader_certificates.is_empty() { - return Ok(()); - } - // Construct a list of the certificates. let certificates = blocks .iter() @@ -246,10 +283,10 @@ impl Sync { .flatten() .collect::>(); - // If a BFT sender was provided, send the certificate to the BFT. + // If a BFT sender was provided, send the certificates to the BFT. if let Some(bft_sender) = self.bft_sender.get() { // Await the callback to continue. - if let Err(e) = bft_sender.tx_sync_bft_dag_at_bootup.send((leader_certificates, certificates)).await { + if let Err(e) = bft_sender.tx_sync_bft_dag_at_bootup.send(certificates).await { bail!("Failed to update the BFT DAG from sync: {e}"); } } @@ -259,8 +296,36 @@ impl Sync { /// Syncs the storage with the given blocks. pub async fn sync_storage_with_blocks(&self) -> Result<()> { + // Acquire the response lock. + let _lock = self.response_lock.lock().await; + // Retrieve the latest block height. let mut current_height = self.ledger.latest_block_height() + 1; + + // Retrieve the maximum block height of the peers. + let tip = self.block_sync.find_sync_peers().map(|(x, _)| x.into_values().max().unwrap_or(0)).unwrap_or(0); + // Determine the number of maximum number of blocks that would have been garbage collected. + let max_gc_blocks = u32::try_from(self.storage.max_gc_rounds())?.saturating_div(2); + // Determine the maximum height that the peer would have garbage collected. + let max_gc_height = tip.saturating_sub(max_gc_blocks); + + // Determine if we can sync the ledger without updating the BFT first. + if current_height <= max_gc_height { + // Try to advance the ledger *to tip* without updating the BFT. + while let Some(block) = self.block_sync.process_next_block(current_height) { + info!("Syncing the ledger to block {}...", block.height()); + self.sync_ledger_with_block_without_bft(block).await?; + // Update the current height. + current_height += 1; + } + // Sync the storage with the ledger if we should transition to the BFT sync. + if current_height > max_gc_height { + if let Err(e) = self.sync_storage_with_ledger_at_bootup().await { + error!("BFT sync (with bootup routine) failed - {e}"); + } + } + } + // Try to advance the ledger with sync blocks. while let Some(block) = self.block_sync.process_next_block(current_height) { info!("Syncing the BFT to block {}...", block.height()); @@ -272,34 +337,200 @@ impl Sync { Ok(()) } + /// Syncs the ledger with the given block without updating the BFT. + async fn sync_ledger_with_block_without_bft(&self, block: Block) -> Result<()> { + // Acquire the sync lock. + let _lock = self.sync_lock.lock().await; + + let self_ = self.clone(); + tokio::task::spawn_blocking(move || { + // Check the next block. + self_.ledger.check_next_block(&block)?; + // Attempt to advance to the next block. + self_.ledger.advance_to_next_block(&block)?; + + // Sync the height with the block. + self_.storage.sync_height_with_block(block.height()); + // Sync the round with the block. + self_.storage.sync_round_with_block(block.round()); + + Ok(()) + }) + .await? + } + /// Syncs the storage with the given blocks. pub async fn sync_storage_with_block(&self, block: Block) -> Result<()> { // Acquire the sync lock. - let _lock = self.lock.lock().await; + let _lock = self.sync_lock.lock().await; + // Acquire the latest block responses lock. + let mut latest_block_responses = self.latest_block_responses.lock().await; + + // If this block has already been processed, return early. + if self.ledger.contains_block_height(block.height()) || latest_block_responses.contains_key(&block.height()) { + return Ok(()); + } // If the block authority is a subdag, then sync the batch certificates with the block. if let Authority::Quorum(subdag) = block.authority() { + // Reconstruct the unconfirmed transactions. + let unconfirmed_transactions = cfg_iter!(block.transactions()) + .filter_map(|tx| { + tx.to_unconfirmed_transaction().map(|unconfirmed| (unconfirmed.id(), unconfirmed)).ok() + }) + .collect::>(); + // Iterate over the certificates. - for certificate in subdag.values().flatten() { - // Sync the batch certificate with the block. - self.storage.sync_certificate_with_block(&block, certificate); - // If a BFT sender was provided, send the certificate to the BFT. - if let Some(bft_sender) = self.bft_sender.get() { - // Await the callback to continue. - if let Err(e) = bft_sender.send_sync_bft(certificate.clone()).await { - bail!("Sync - {e}"); - }; + for certificates in subdag.values().cloned() { + cfg_into_iter!(certificates.clone()).for_each(|certificate| { + // Sync the batch certificate with the block. + self.storage.sync_certificate_with_block(&block, certificate.clone(), &unconfirmed_transactions); + }); + + // Sync the BFT DAG with the certificates. + for certificate in certificates { + // If a BFT sender was provided, send the certificate to the BFT. + if let Some(bft_sender) = self.bft_sender.get() { + // Await the callback to continue. + if let Err(e) = bft_sender.send_sync_bft(certificate).await { + bail!("Sync - {e}"); + }; + } } } } - // Sync the height with the block. - self.storage.sync_height_with_block(block.height()); - // Sync the round with the block. - self.storage.sync_round_with_block(block.round()); + // Fetch the latest block height. + let latest_block_height = self.ledger.latest_block_height(); + + // Insert the latest block response. + latest_block_responses.insert(block.height(), block); + // Clear the latest block responses of older blocks. + latest_block_responses.retain(|height, _| *height > latest_block_height); + + // Get a list of contiguous blocks from the latest block responses. + let contiguous_blocks: Vec> = (latest_block_height.saturating_add(1)..) + .take_while(|&k| latest_block_responses.contains_key(&k)) + .filter_map(|k| latest_block_responses.get(&k).cloned()) + .collect(); + + // Check if the block response is ready to be added to the ledger. + // Ensure that the previous block's leader certificate meets the availability threshold + // based on the certificates in the current block. + // If the availability threshold is not met, process the next block and check if it is linked to the current block. + // Note: We do not advance to the most recent block response because we would be unable to + // validate if the leader certificate in the block has been certified properly. + for next_block in contiguous_blocks.into_iter() { + // Retrieve the height of the next block. + let next_block_height = next_block.height(); + + // Fetch the leader certificate and the relevant rounds. + let leader_certificate = match next_block.authority() { + Authority::Quorum(subdag) => subdag.leader_certificate().clone(), + _ => bail!("Received a block with an unexpected authority type."), + }; + let commit_round = leader_certificate.round(); + let certificate_round = commit_round.saturating_add(1); + + // Get the committee lookback for the commit round. + let committee_lookback = self.ledger.get_committee_lookback_for_round(commit_round)?; + // Retrieve all of the certificates for the **certificate** round. + let certificates = self.storage.get_certificates_for_round(certificate_round); + // Construct a set over the authors who included the leader's certificate in the certificate round. + let authors = certificates + .iter() + .filter_map(|c| match c.previous_certificate_ids().contains(&leader_certificate.id()) { + true => Some(c.author()), + false => None, + }) + .collect(); + + debug!("Validating sync block {next_block_height} at round {commit_round}..."); + // Check if the leader is ready to be committed. + if committee_lookback.is_availability_threshold_reached(&authors) { + // Initialize the current certificate. + let mut current_certificate = leader_certificate; + // Check if there are any linked blocks that need to be added. + let mut blocks_to_add = vec![next_block]; + + // Check if there are other blocks to process based on `is_linked`. + for height in (self.ledger.latest_block_height().saturating_add(1)..next_block_height).rev() { + // Retrieve the previous block. + let Some(previous_block) = latest_block_responses.get(&height) else { + bail!("Block {height} is missing from the latest block responses."); + }; + // Retrieve the previous certificate. + let previous_certificate = match previous_block.authority() { + Authority::Quorum(subdag) => subdag.leader_certificate().clone(), + _ => bail!("Received a block with an unexpected authority type."), + }; + // Determine if there is a path between the previous certificate and the current certificate. + if self.is_linked(previous_certificate.clone(), current_certificate.clone())? { + debug!("Previous sync block {height} is linked to the current block {next_block_height}"); + // Add the previous leader certificate to the list of certificates to commit. + blocks_to_add.insert(0, previous_block.clone()); + // Update the current certificate to the previous leader certificate. + current_certificate = previous_certificate; + } + } + + // Add the blocks to the ledger. + for block in blocks_to_add { + // Check that the blocks are sequential and can be added to the ledger. + let block_height = block.height(); + if block_height != self.ledger.latest_block_height().saturating_add(1) { + warn!("Skipping block {block_height} from the latest block responses - not sequential."); + continue; + } + + let self_ = self.clone(); + tokio::task::spawn_blocking(move || { + // Check the next block. + self_.ledger.check_next_block(&block)?; + // Attempt to advance to the next block. + self_.ledger.advance_to_next_block(&block)?; + + // Sync the height with the block. + self_.storage.sync_height_with_block(block.height()); + // Sync the round with the block. + self_.storage.sync_round_with_block(block.round()); + + Ok::<(), anyhow::Error>(()) + }) + .await??; + // Remove the block height from the latest block responses. + latest_block_responses.remove(&block_height); + } + } else { + debug!( + "Availability threshold was not reached for block {next_block_height} at round {commit_round}. Checking next block..." + ); + } + } Ok(()) } + + /// Returns `true` if there is a path from the previous certificate to the current certificate. + fn is_linked( + &self, + previous_certificate: BatchCertificate, + current_certificate: BatchCertificate, + ) -> Result { + // Initialize the list containing the traversal. + let mut traversal = vec![current_certificate.clone()]; + // Iterate over the rounds from the current certificate to the previous certificate. + for round in (previous_certificate.round()..current_certificate.round()).rev() { + // Retrieve all of the certificates for this past round. + let certificates = self.storage.get_certificates_for_round(round); + // Filter the certificates to only include those that are in the traversal. + traversal = certificates + .into_iter() + .filter(|p| traversal.iter().any(|c| c.previous_certificate_ids().contains(&p.id()))) + .collect(); + } + Ok(traversal.contains(&previous_certificate)) + } } // Methods to assist with the block sync module. @@ -312,6 +543,11 @@ impl Sync { self.block_sync.is_block_synced() } + /// Returns the number of blocks the node is behind the greatest peer height. + pub fn num_blocks_behind(&self) -> u32 { + self.block_sync.num_blocks_behind() + } + /// Returns `true` if the node is in gateway mode. pub const fn is_gateway_mode(&self) -> bool { self.block_sync.mode().is_gateway() @@ -333,19 +569,34 @@ impl Sync { ) -> Result> { // Initialize a oneshot channel. let (callback_sender, callback_receiver) = oneshot::channel(); + // Determine how many sent requests are pending. + let num_sent_requests = self.pending.num_sent_requests(certificate_id); + // Determine the maximum number of redundant requests. + let num_redundant_requests = max_redundant_requests(self.ledger.clone(), self.storage.current_round()); + // Determine if we should send a certificate request to the peer. + let should_send_request = num_sent_requests < num_redundant_requests; + // Insert the certificate ID into the pending queue. - if self.pending.insert(certificate_id, peer_ip, Some(callback_sender)) { + self.pending.insert(certificate_id, peer_ip, Some((callback_sender, should_send_request))); + + // If the number of requests is less than or equal to the redundancy factor, send the certificate request to the peer. + if should_send_request { // Send the certificate request to the peer. if self.gateway.send(peer_ip, Event::CertificateRequest(certificate_id.into())).await.is_none() { bail!("Unable to fetch batch certificate {certificate_id} - failed to send request") } + } else { + debug!( + "Skipped sending request for certificate {} to '{peer_ip}' ({num_sent_requests} redundant requests)", + fmt_id(certificate_id) + ); } // Wait for the certificate to be fetched. - match tokio::time::timeout(core::time::Duration::from_millis(MAX_BATCH_DELAY_IN_MS), callback_receiver).await { + match tokio::time::timeout(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS), callback_receiver).await { // If the certificate was fetched, return it. Ok(result) => Ok(result?), // If the certificate was not fetched, return an error. - Err(e) => bail!("Unable to fetch batch certificate {certificate_id} - (timeout) {e}"), + Err(e) => bail!("Unable to fetch certificate {} - (timeout) {e}", fmt_id(certificate_id)), } } @@ -366,7 +617,7 @@ impl Sync { fn finish_certificate_request(&self, peer_ip: SocketAddr, response: CertificateResponse) { let certificate = response.certificate; // Check if the peer IP exists in the pending queue for the given certificate ID. - let exists = self.pending.get(certificate.id()).unwrap_or_default().contains(&peer_ip); + let exists = self.pending.get_peers(certificate.id()).unwrap_or_default().contains(&peer_ip); // If the peer IP exists, finish the pending request. if exists { // TODO: Validate the certificate. @@ -385,9 +636,425 @@ impl Sync { /// Shuts down the primary. pub async fn shut_down(&self) { info!("Shutting down the sync module..."); + // Acquire the response lock. + let _lock = self.response_lock.lock().await; // Acquire the sync lock. - let _lock = self.lock.lock().await; + let _lock = self.sync_lock.lock().await; // Abort the tasks. self.handles.lock().iter().for_each(|handle| handle.abort()); } } +#[cfg(test)] +mod tests { + use super::*; + + use crate::{helpers::now, ledger_service::CoreLedgerService, storage_service::BFTMemoryService}; + use snarkos_account::Account; + use snarkvm::{ + console::{ + account::{Address, PrivateKey}, + network::MainnetV0, + }, + ledger::{ + narwhal::{BatchCertificate, BatchHeader, Subdag}, + store::{helpers::memory::ConsensusMemory, ConsensusStore}, + }, + prelude::{Ledger, VM}, + utilities::TestRng, + }; + + use aleo_std::StorageMode; + use indexmap::IndexSet; + use rand::Rng; + use std::collections::BTreeMap; + + type CurrentNetwork = MainnetV0; + type CurrentLedger = Ledger>; + type CurrentConsensusStore = ConsensusStore>; + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_commit_via_is_linked() -> anyhow::Result<()> { + let rng = &mut TestRng::default(); + // Initialize the round parameters. + let max_gc_rounds = BatchHeader::::MAX_GC_ROUNDS as u64; + let commit_round = 2; + + // Initialize the store. + let store = CurrentConsensusStore::open(None).unwrap(); + let account: Account = Account::new(rng)?; + + // Create a genesis block with a seeded RNG to reproduce the same genesis private keys. + let seed: u64 = rng.gen(); + let genesis_rng = &mut TestRng::from_seed(seed); + let genesis = VM::from(store).unwrap().genesis_beacon(account.private_key(), genesis_rng).unwrap(); + + // Extract the private keys from the genesis committee by using the same RNG to sample private keys. + let genesis_rng = &mut TestRng::from_seed(seed); + let private_keys = [ + *account.private_key(), + PrivateKey::new(genesis_rng)?, + PrivateKey::new(genesis_rng)?, + PrivateKey::new(genesis_rng)?, + ]; + + // Initialize the ledger with the genesis block. + let ledger = CurrentLedger::load(genesis.clone(), StorageMode::Production).unwrap(); + // Initialize the ledger. + let core_ledger = Arc::new(CoreLedgerService::new(ledger.clone(), Default::default())); + + // Sample 5 rounds of batch certificates starting at the genesis round from a static set of 4 authors. + let (round_to_certificates_map, committee) = { + let addresses = vec![ + Address::try_from(private_keys[0])?, + Address::try_from(private_keys[1])?, + Address::try_from(private_keys[2])?, + Address::try_from(private_keys[3])?, + ]; + + let committee = ledger.latest_committee().unwrap(); + + // Initialize a mapping from the round number to the set of batch certificates in the round. + let mut round_to_certificates_map: HashMap>> = + HashMap::new(); + let mut previous_certificates: IndexSet> = IndexSet::with_capacity(4); + + for round in 0..=commit_round + 8 { + let mut current_certificates = IndexSet::new(); + let previous_certificate_ids: IndexSet<_> = if round == 0 || round == 1 { + IndexSet::new() + } else { + previous_certificates.iter().map(|c| c.id()).collect() + }; + let committee_id = committee.id(); + + // Create a certificate for the leader. + if round <= 5 { + let leader = committee.get_leader(round).unwrap(); + let i = addresses.iter().position(|&address| address == leader).unwrap(); + let batch_header = BatchHeader::new( + &private_keys[i], + round, + now(), + committee_id, + Default::default(), + previous_certificate_ids.clone(), + rng, + ) + .unwrap(); + // Sign the batch header. + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + current_certificates.insert(BatchCertificate::from(batch_header, signatures).unwrap()); + } + + // Create a certificate for each validator. + if round > 5 { + for (i, private_key_1) in private_keys.iter().enumerate() { + let batch_header = BatchHeader::new( + private_key_1, + round, + now(), + committee_id, + Default::default(), + previous_certificate_ids.clone(), + rng, + ) + .unwrap(); + // Sign the batch header. + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + current_certificates.insert(BatchCertificate::from(batch_header, signatures).unwrap()); + } + } + // Update the map of certificates. + round_to_certificates_map.insert(round, current_certificates.clone()); + previous_certificates = current_certificates.clone(); + } + (round_to_certificates_map, committee) + }; + + // Initialize the storage. + let storage = Storage::new(core_ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Insert certificates into storage. + let mut certificates: Vec> = Vec::new(); + for i in 1..=commit_round + 8 { + let c = (*round_to_certificates_map.get(&i).unwrap()).clone(); + certificates.extend(c); + } + for certificate in certificates.clone().iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + + // Create block 1. + let leader_round_1 = commit_round; + let leader_1 = committee.get_leader(leader_round_1).unwrap(); + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader_1).unwrap(); + let block_1 = { + let mut subdag_map: BTreeMap>> = BTreeMap::new(); + let mut leader_cert_map = IndexSet::new(); + leader_cert_map.insert(leader_certificate.clone()); + let mut previous_cert_map = IndexSet::new(); + for cert in storage.get_certificates_for_round(commit_round - 1) { + previous_cert_map.insert(cert); + } + subdag_map.insert(commit_round, leader_cert_map.clone()); + subdag_map.insert(commit_round - 1, previous_cert_map.clone()); + let subdag = Subdag::from(subdag_map.clone())?; + core_ledger.prepare_advance_to_next_quorum_block(subdag, Default::default())? + }; + // Insert block 1. + core_ledger.advance_to_next_block(&block_1)?; + + // Create block 2. + let leader_round_2 = commit_round + 2; + let leader_2 = committee.get_leader(leader_round_2).unwrap(); + let leader_certificate_2 = storage.get_certificate_for_round_with_author(leader_round_2, leader_2).unwrap(); + let block_2 = { + let mut subdag_map_2: BTreeMap>> = BTreeMap::new(); + let mut leader_cert_map_2 = IndexSet::new(); + leader_cert_map_2.insert(leader_certificate_2.clone()); + let mut previous_cert_map_2 = IndexSet::new(); + for cert in storage.get_certificates_for_round(leader_round_2 - 1) { + previous_cert_map_2.insert(cert); + } + subdag_map_2.insert(leader_round_2, leader_cert_map_2.clone()); + subdag_map_2.insert(leader_round_2 - 1, previous_cert_map_2.clone()); + let subdag_2 = Subdag::from(subdag_map_2.clone())?; + core_ledger.prepare_advance_to_next_quorum_block(subdag_2, Default::default())? + }; + // Insert block 2. + core_ledger.advance_to_next_block(&block_2)?; + + // Create block 3 + let leader_round_3 = commit_round + 4; + let leader_3 = committee.get_leader(leader_round_3).unwrap(); + let leader_certificate_3 = storage.get_certificate_for_round_with_author(leader_round_3, leader_3).unwrap(); + let block_3 = { + let mut subdag_map_3: BTreeMap>> = BTreeMap::new(); + let mut leader_cert_map_3 = IndexSet::new(); + leader_cert_map_3.insert(leader_certificate_3.clone()); + let mut previous_cert_map_3 = IndexSet::new(); + for cert in storage.get_certificates_for_round(leader_round_3 - 1) { + previous_cert_map_3.insert(cert); + } + subdag_map_3.insert(leader_round_3, leader_cert_map_3.clone()); + subdag_map_3.insert(leader_round_3 - 1, previous_cert_map_3.clone()); + let subdag_3 = Subdag::from(subdag_map_3.clone())?; + core_ledger.prepare_advance_to_next_quorum_block(subdag_3, Default::default())? + }; + // Insert block 3. + core_ledger.advance_to_next_block(&block_3)?; + + // Initialize the syncing ledger. + let syncing_ledger = Arc::new(CoreLedgerService::new( + CurrentLedger::load(genesis, StorageMode::Production).unwrap(), + Default::default(), + )); + // Initialize the gateway. + let gateway = Gateway::new(account.clone(), storage.clone(), syncing_ledger.clone(), None, &[], None)?; + // Initialize the sync module. + let sync = Sync::new(gateway.clone(), storage.clone(), syncing_ledger.clone()); + // Try to sync block 1. + sync.sync_storage_with_block(block_1).await?; + // Ensure that the sync ledger has not advanced. + assert_eq!(syncing_ledger.latest_block_height(), 0); + // Try to sync block 2. + sync.sync_storage_with_block(block_2).await?; + // Ensure that the sync ledger has not advanced. + assert_eq!(syncing_ledger.latest_block_height(), 0); + // Try to sync block 3. + sync.sync_storage_with_block(block_3).await?; + // Ensure blocks 1 and 2 were added to the ledger. + assert!(syncing_ledger.contains_block_height(1)); + assert!(syncing_ledger.contains_block_height(2)); + + Ok(()) + } + + #[tokio::test] + #[tracing_test::traced_test] + async fn test_pending_certificates() -> anyhow::Result<()> { + let rng = &mut TestRng::default(); + // Initialize the round parameters. + let max_gc_rounds = BatchHeader::::MAX_GC_ROUNDS as u64; + let commit_round = 2; + + // Initialize the store. + let store = CurrentConsensusStore::open(None).unwrap(); + let account: Account = Account::new(rng)?; + + // Create a genesis block with a seeded RNG to reproduce the same genesis private keys. + let seed: u64 = rng.gen(); + let genesis_rng = &mut TestRng::from_seed(seed); + let genesis = VM::from(store).unwrap().genesis_beacon(account.private_key(), genesis_rng).unwrap(); + + // Extract the private keys from the genesis committee by using the same RNG to sample private keys. + let genesis_rng = &mut TestRng::from_seed(seed); + let private_keys = [ + *account.private_key(), + PrivateKey::new(genesis_rng)?, + PrivateKey::new(genesis_rng)?, + PrivateKey::new(genesis_rng)?, + ]; + // Initialize the ledger with the genesis block. + let ledger = CurrentLedger::load(genesis.clone(), StorageMode::Production).unwrap(); + // Initialize the ledger. + let core_ledger = Arc::new(CoreLedgerService::new(ledger.clone(), Default::default())); + // Sample rounds of batch certificates starting at the genesis round from a static set of 4 authors. + let (round_to_certificates_map, committee) = { + // Initialize the committee. + let committee = ledger.latest_committee().unwrap(); + // Initialize a mapping from the round number to the set of batch certificates in the round. + let mut round_to_certificates_map: HashMap>> = + HashMap::new(); + let mut previous_certificates: IndexSet> = IndexSet::with_capacity(4); + + for round in 0..=commit_round + 8 { + let mut current_certificates = IndexSet::new(); + let previous_certificate_ids: IndexSet<_> = if round == 0 || round == 1 { + IndexSet::new() + } else { + previous_certificates.iter().map(|c| c.id()).collect() + }; + let committee_id = committee.id(); + // Create a certificate for each validator. + for (i, private_key_1) in private_keys.iter().enumerate() { + let batch_header = BatchHeader::new( + private_key_1, + round, + now(), + committee_id, + Default::default(), + previous_certificate_ids.clone(), + rng, + ) + .unwrap(); + // Sign the batch header. + let mut signatures = IndexSet::with_capacity(4); + for (j, private_key_2) in private_keys.iter().enumerate() { + if i != j { + signatures.insert(private_key_2.sign(&[batch_header.batch_id()], rng).unwrap()); + } + } + current_certificates.insert(BatchCertificate::from(batch_header, signatures).unwrap()); + } + + // Update the map of certificates. + round_to_certificates_map.insert(round, current_certificates.clone()); + previous_certificates = current_certificates.clone(); + } + (round_to_certificates_map, committee) + }; + + // Initialize the storage. + let storage = Storage::new(core_ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + // Insert certificates into storage. + let mut certificates: Vec> = Vec::new(); + for i in 1..=commit_round + 8 { + let c = (*round_to_certificates_map.get(&i).unwrap()).clone(); + certificates.extend(c); + } + for certificate in certificates.clone().iter() { + storage.testing_only_insert_certificate_testing_only(certificate.clone()); + } + // Create block 1. + let leader_round_1 = commit_round; + let leader_1 = committee.get_leader(leader_round_1).unwrap(); + let leader_certificate = storage.get_certificate_for_round_with_author(commit_round, leader_1).unwrap(); + let mut subdag_map: BTreeMap>> = BTreeMap::new(); + let block_1 = { + let mut leader_cert_map = IndexSet::new(); + leader_cert_map.insert(leader_certificate.clone()); + let mut previous_cert_map = IndexSet::new(); + for cert in storage.get_certificates_for_round(commit_round - 1) { + previous_cert_map.insert(cert); + } + subdag_map.insert(commit_round, leader_cert_map.clone()); + subdag_map.insert(commit_round - 1, previous_cert_map.clone()); + let subdag = Subdag::from(subdag_map.clone())?; + core_ledger.prepare_advance_to_next_quorum_block(subdag, Default::default())? + }; + // Insert block 1. + core_ledger.advance_to_next_block(&block_1)?; + + // Create block 2. + let leader_round_2 = commit_round + 2; + let leader_2 = committee.get_leader(leader_round_2).unwrap(); + let leader_certificate_2 = storage.get_certificate_for_round_with_author(leader_round_2, leader_2).unwrap(); + let mut subdag_map_2: BTreeMap>> = BTreeMap::new(); + let block_2 = { + let mut leader_cert_map_2 = IndexSet::new(); + leader_cert_map_2.insert(leader_certificate_2.clone()); + let mut previous_cert_map_2 = IndexSet::new(); + for cert in storage.get_certificates_for_round(leader_round_2 - 1) { + previous_cert_map_2.insert(cert); + } + subdag_map_2.insert(leader_round_2, leader_cert_map_2.clone()); + subdag_map_2.insert(leader_round_2 - 1, previous_cert_map_2.clone()); + let subdag_2 = Subdag::from(subdag_map_2.clone())?; + core_ledger.prepare_advance_to_next_quorum_block(subdag_2, Default::default())? + }; + // Insert block 2. + core_ledger.advance_to_next_block(&block_2)?; + + // Create block 3 + let leader_round_3 = commit_round + 4; + let leader_3 = committee.get_leader(leader_round_3).unwrap(); + let leader_certificate_3 = storage.get_certificate_for_round_with_author(leader_round_3, leader_3).unwrap(); + let mut subdag_map_3: BTreeMap>> = BTreeMap::new(); + let block_3 = { + let mut leader_cert_map_3 = IndexSet::new(); + leader_cert_map_3.insert(leader_certificate_3.clone()); + let mut previous_cert_map_3 = IndexSet::new(); + for cert in storage.get_certificates_for_round(leader_round_3 - 1) { + previous_cert_map_3.insert(cert); + } + subdag_map_3.insert(leader_round_3, leader_cert_map_3.clone()); + subdag_map_3.insert(leader_round_3 - 1, previous_cert_map_3.clone()); + let subdag_3 = Subdag::from(subdag_map_3.clone())?; + core_ledger.prepare_advance_to_next_quorum_block(subdag_3, Default::default())? + }; + // Insert block 3. + core_ledger.advance_to_next_block(&block_3)?; + + /* + Check that the pending certificates are computed correctly. + */ + + // Retrieve the pending certificates. + let pending_certificates = storage.get_pending_certificates(); + // Check that all of the pending certificates are not contained in the ledger. + for certificate in pending_certificates.clone() { + assert!(!core_ledger.contains_certificate(&certificate.id()).unwrap_or(false)); + } + // Initialize an empty set to be populated with the committed certificates in the block subdags. + let mut committed_certificates: IndexSet> = IndexSet::new(); + { + let subdag_maps = [&subdag_map, &subdag_map_2, &subdag_map_3]; + for subdag in subdag_maps.iter() { + for subdag_certificates in subdag.values() { + committed_certificates.extend(subdag_certificates.iter().cloned()); + } + } + }; + // Create the set of candidate pending certificates as the set of all certificates minus the set of the committed certificates. + let mut candidate_pending_certificates: IndexSet> = IndexSet::new(); + for certificate in certificates.clone() { + if !committed_certificates.contains(&certificate) { + candidate_pending_certificates.insert(certificate); + } + } + // Check that the set of pending certificates is equal to the set of candidate pending certificates. + assert_eq!(pending_certificates, candidate_pending_certificates); + Ok(()) + } +} diff --git a/node/bft/src/worker.rs b/node/bft/src/worker.rs index 4efe356fc2..dc82d775ce 100644 --- a/node/bft/src/worker.rs +++ b/node/bft/src/worker.rs @@ -14,31 +14,29 @@ use crate::{ events::{Event, TransmissionRequest, TransmissionResponse}, - helpers::{fmt_id, Pending, Ready, Storage, WorkerReceiver}, + helpers::{fmt_id, max_redundant_requests, Pending, Ready, Storage, WorkerReceiver}, + spawn_blocking, ProposedBatch, Transport, - MAX_BATCH_DELAY_IN_MS, - MAX_TRANSMISSIONS_PER_BATCH, - MAX_TRANSMISSIONS_PER_WORKER_PING, + MAX_FETCH_TIMEOUT_IN_MS, MAX_WORKERS, }; use snarkos_node_bft_ledger_service::LedgerService; use snarkvm::{ console::prelude::*, - ledger::narwhal::{Data, Transmission, TransmissionID}, - prelude::{ + ledger::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, + narwhal::{BatchHeader, Data, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, }; use indexmap::{IndexMap, IndexSet}; use parking_lot::Mutex; +use rand::seq::IteratorRandom; use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle, time::timeout}; -const MAX_TRANSMISSIONS_PER_WORKER: usize = MAX_TRANSMISSIONS_PER_BATCH / MAX_WORKERS as usize; - #[derive(Clone)] pub struct Worker { /// The worker ID. @@ -94,9 +92,22 @@ impl Worker { pub const fn id(&self) -> u8 { self.id } + + /// Returns a reference to the pending transmissions queue. + pub fn pending(&self) -> &Arc, Transmission>> { + &self.pending + } } impl Worker { + /// The maximum number of transmissions allowed in a worker. + pub const MAX_TRANSMISSIONS_PER_WORKER: usize = + BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH / MAX_WORKERS as usize; + /// The maximum number of transmissions allowed in a worker ping. + pub const MAX_TRANSMISSIONS_PER_WORKER_PING: usize = BatchHeader::::MAX_TRANSMISSIONS_PER_BATCH / 10; + + // transmissions + /// Returns the number of transmissions in the ready queue. pub fn num_transmissions(&self) -> usize { self.ready.num_transmissions() @@ -130,7 +141,7 @@ impl Worker { } /// Returns the solutions in the ready queue. - pub fn solutions(&self) -> impl '_ + Iterator, Data>)> { + pub fn solutions(&self) -> impl '_ + Iterator, Data>)> { self.ready.solutions() } @@ -209,8 +220,13 @@ impl Worker { /// Broadcasts a worker ping event. pub(crate) fn broadcast_ping(&self) { // Retrieve the transmission IDs. - let transmission_ids = - self.ready.transmission_ids().into_iter().take(MAX_TRANSMISSIONS_PER_WORKER_PING).collect::>(); + let transmission_ids = self + .ready + .transmission_ids() + .into_iter() + .choose_multiple(&mut rand::thread_rng(), Self::MAX_TRANSMISSIONS_PER_WORKER_PING) + .into_iter() + .collect::>(); // Broadcast the ping event. if !transmission_ids.is_empty() { @@ -228,7 +244,7 @@ impl Worker { } // If the ready queue is full, then skip this transmission. // Note: We must prioritize the unconfirmed solutions and unconfirmed transactions, not transmissions. - if self.ready.num_transmissions() > MAX_TRANSMISSIONS_PER_WORKER { + if self.ready.num_transmissions() > Self::MAX_TRANSMISSIONS_PER_WORKER { return; } // Attempt to fetch the transmission from the peer. @@ -289,24 +305,22 @@ impl Worker { /// Note: This method assumes the incoming solution is valid and does not exist in the ledger. pub(crate) async fn process_unconfirmed_solution( &self, - puzzle_commitment: PuzzleCommitment, - prover_solution: Data>, + solution_id: SolutionID, + solution: Data>, ) -> Result<()> { // Construct the transmission. - let transmission = Transmission::Solution(prover_solution.clone()); - // Remove the puzzle commitment from the pending queue. - self.pending.remove(puzzle_commitment, Some(transmission.clone())); + let transmission = Transmission::Solution(solution.clone()); + // Remove the solution ID from the pending queue. + self.pending.remove(solution_id, Some(transmission.clone())); // Check if the solution exists. - if self.contains_transmission(puzzle_commitment) { - bail!("Solution '{}' already exists.", fmt_id(puzzle_commitment)); + if self.contains_transmission(solution_id) { + bail!("Solution '{}' already exists.", fmt_id(solution_id)); } // Check that the solution is well-formed and unique. - if let Err(e) = self.ledger.check_solution_basic(puzzle_commitment, prover_solution).await { - bail!("Invalid unconfirmed solution '{}': {e}", fmt_id(puzzle_commitment)); - } - // Adds the prover solution to the ready queue. - if self.ready.insert(puzzle_commitment, transmission) { - trace!("Worker {} - Added unconfirmed solution '{}'", self.id, fmt_id(puzzle_commitment)); + self.ledger.check_solution_basic(solution_id, solution).await?; + // Adds the solution to the ready queue. + if self.ready.insert(solution_id, transmission) { + trace!("Worker {} - Added unconfirmed solution '{}'", self.id, fmt_id(solution_id)); } Ok(()) } @@ -326,9 +340,7 @@ impl Worker { bail!("Transaction '{}' already exists.", fmt_id(transaction_id)); } // Check that the transaction is well-formed and unique. - if let Err(e) = self.ledger.check_transaction_basic(transaction_id, transaction).await { - bail!("Invalid unconfirmed transaction '{}': {e}", fmt_id(transaction_id)); - } + self.ledger.check_transaction_basic(transaction_id, transaction).await?; // Adds the transaction to the ready queue. if self.ready.insert(&transaction_id, transmission) { trace!("Worker {} - Added unconfirmed transaction '{}'", self.id, fmt_id(transaction_id)); @@ -342,6 +354,22 @@ impl Worker { fn start_handlers(&self, receiver: WorkerReceiver) { let WorkerReceiver { mut rx_worker_ping, mut rx_transmission_request, mut rx_transmission_response } = receiver; + // Start the pending queue expiration loop. + let self_ = self.clone(); + self.spawn(async move { + loop { + // Sleep briefly. + tokio::time::sleep(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS)).await; + + // Remove the expired pending certificate requests. + let self__ = self_.clone(); + let _ = spawn_blocking!({ + self__.pending.clear_expired_callbacks(); + Ok(()) + }); + } + }); + // Process the ping events. let self_ = self.clone(); self.spawn(async move { @@ -363,7 +391,11 @@ impl Worker { self.spawn(async move { while let Some((peer_ip, transmission_response)) = rx_transmission_response.recv().await { // Process the transmission response. - self_.finish_transmission_request(peer_ip, transmission_response); + let self__ = self_.clone(); + let _ = spawn_blocking!({ + self__.finish_transmission_request(peer_ip, transmission_response); + Ok(()) + }); } }); } @@ -376,14 +408,30 @@ impl Worker { ) -> Result<(TransmissionID, Transmission)> { // Initialize a oneshot channel. let (callback_sender, callback_receiver) = oneshot::channel(); + // Determine how many sent requests are pending. + let num_sent_requests = self.pending.num_sent_requests(transmission_id); + // Determine the maximum number of redundant requests. + let num_redundant_requests = max_redundant_requests(self.ledger.clone(), self.storage.current_round()); + // Determine if we should send a transmission request to the peer. + let should_send_request = num_sent_requests < num_redundant_requests; + // Insert the transmission ID into the pending queue. - self.pending.insert(transmission_id, peer_ip, Some(callback_sender)); - // Send the transmission request to the peer. - if self.gateway.send(peer_ip, Event::TransmissionRequest(transmission_id.into())).await.is_none() { - bail!("Unable to fetch transmission - failed to send request") + self.pending.insert(transmission_id, peer_ip, Some((callback_sender, should_send_request))); + + // If the number of requests is less than or equal to the the redundancy factor, send the transmission request to the peer. + if should_send_request { + // Send the transmission request to the peer. + if self.gateway.send(peer_ip, Event::TransmissionRequest(transmission_id.into())).await.is_none() { + bail!("Unable to fetch transmission - failed to send request") + } + } else { + debug!( + "Skipped sending request for transmission {} to '{peer_ip}' ({num_sent_requests} redundant requests)", + fmt_id(transmission_id) + ); } // Wait for the transmission to be fetched. - match timeout(Duration::from_millis(MAX_BATCH_DELAY_IN_MS), callback_receiver).await { + match timeout(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS), callback_receiver).await { // If the transmission was fetched, return it. Ok(result) => Ok((transmission_id, result?)), // If the transmission was not fetched, return an error. @@ -396,11 +444,11 @@ impl Worker { fn finish_transmission_request(&self, peer_ip: SocketAddr, response: TransmissionResponse) { let TransmissionResponse { transmission_id, mut transmission } = response; // Check if the peer IP exists in the pending queue for the given transmission ID. - let exists = self.pending.get(transmission_id).unwrap_or_default().contains(&peer_ip); + let exists = self.pending.get_peers(transmission_id).unwrap_or_default().contains(&peer_ip); // If the peer IP exists, finish the pending request. if exists { - // Ensure the transmission ID matches the transmission. - match self.ledger.ensure_transmission_id_matches(transmission_id, &mut transmission) { + // Ensure the transmission is not a fee and matches the transmission ID. + match self.ledger.ensure_transmission_is_well_formed(transmission_id, &mut transmission) { Ok(()) => { // Remove the transmission ID from the pending queue. self.pending.remove(transmission_id, Some(transmission)); @@ -439,6 +487,7 @@ impl Worker { #[cfg(test)] mod tests { use super::*; + use crate::helpers::CALLBACK_EXPIRATION_IN_SECS; use snarkos_node_bft_ledger_service::LedgerService; use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ @@ -448,6 +497,7 @@ mod tests { committee::Committee, narwhal::{BatchCertificate, Subdag, Transmission, TransmissionID}, }, + prelude::Address, }; use bytes::Bytes; @@ -455,7 +505,9 @@ mod tests { use mockall::mock; use std::{io, ops::Range}; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; + + const ITERATIONS: usize = 100; mock! { Gateway {} @@ -474,28 +526,31 @@ mod tests { fn latest_round(&self) -> u64; fn latest_block_height(&self) -> u32; fn latest_block(&self) -> Block; + fn latest_leader(&self) -> Option<(u64, Address)>; + fn update_latest_leader(&self, round: u64, leader: Address); fn contains_block_height(&self, height: u32) -> bool; fn get_block_height(&self, hash: &N::BlockHash) -> Result; fn get_block_hash(&self, height: u32) -> Result; + fn get_block_round(&self, height: u32) -> Result; fn get_block(&self, height: u32) -> Result>; fn get_blocks(&self, heights: Range) -> Result>>; - fn get_solution(&self, solution_id: &PuzzleCommitment) -> Result>; + fn get_solution(&self, solution_id: &SolutionID) -> Result>; fn get_unconfirmed_transaction(&self, transaction_id: N::TransactionID) -> Result>; fn get_batch_certificate(&self, certificate_id: &Field) -> Result>; fn current_committee(&self) -> Result>; fn get_committee_for_round(&self, round: u64) -> Result>; - fn get_previous_committee_for_round(&self, round: u64) -> Result>; + fn get_committee_lookback_for_round(&self, round: u64) -> Result>; fn contains_certificate(&self, certificate_id: &Field) -> Result; fn contains_transmission(&self, transmission_id: &TransmissionID) -> Result; - fn ensure_transmission_id_matches( + fn ensure_transmission_is_well_formed( &self, transmission_id: TransmissionID, transmission: &mut Transmission, ) -> Result<()>; async fn check_solution_basic( &self, - puzzle_commitment: PuzzleCommitment, - solution: Data>, + solution_id: SolutionID, + solution: Data>, ) -> Result<()>; async fn check_transaction_basic( &self, @@ -512,15 +567,38 @@ mod tests { } } + #[tokio::test] + async fn test_max_redundant_requests() { + const NUM_NODES: u16 = Committee::::MAX_COMMITTEE_SIZE; + + let rng = &mut TestRng::default(); + // Sample a committee. + let committee = + snarkvm::ledger::committee::test_helpers::sample_committee_for_round_and_size(0, NUM_NODES, rng); + let committee_clone = committee.clone(); + // Setup the mock ledger. + let mut mock_ledger = MockLedger::default(); + mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); + mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); + mock_ledger.expect_check_solution_basic().returning(|_, _| Ok(())); + let ledger: Arc> = Arc::new(mock_ledger); + + // Ensure the maximum number of redundant requests is correct and consistent across iterations. + assert_eq!(max_redundant_requests(ledger, 0), 34, "Update me if the formula changes"); + } + #[tokio::test] async fn test_process_transmission() { let rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let gateway = MockGateway::default(); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); mock_ledger.expect_check_solution_basic().returning(|_, _| Ok(())); let ledger: Arc> = Arc::new(mock_ledger); @@ -530,7 +608,7 @@ mod tests { // Create the Worker. let worker = Worker::new(0, Arc::new(gateway), storage, ledger, Default::default()).unwrap(); let data = |rng: &mut TestRng| Data::Buffer(Bytes::from((0..512).map(|_| rng.gen::()).collect::>())); - let transmission_id = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); + let transmission_id = TransmissionID::Solution(rng.gen::().into()); let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); let transmission = Transmission::Solution(data(rng)); @@ -550,6 +628,7 @@ mod tests { let rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let mut gateway = MockGateway::default(); gateway.expect_send().returning(|_, _| { @@ -558,14 +637,15 @@ mod tests { }); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); - mock_ledger.expect_ensure_transmission_id_matches().returning(|_, _| Ok(())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); + mock_ledger.expect_ensure_transmission_is_well_formed().returning(|_, _| Ok(())); let ledger: Arc> = Arc::new(mock_ledger); // Initialize the storage. let storage = Storage::::new(ledger.clone(), Arc::new(BFTMemoryService::new()), 1); // Create the Worker. let worker = Worker::new(0, Arc::new(gateway), storage, ledger, Default::default()).unwrap(); - let transmission_id = TransmissionID::Solution(PuzzleCommitment::from_g1_affine(rng.gen())); + let transmission_id = TransmissionID::Solution(rng.gen::().into()); let worker_ = worker.clone(); let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); let _ = worker_.send_transmission_request(peer_ip, transmission_id).await; @@ -585,6 +665,7 @@ mod tests { let rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let mut gateway = MockGateway::default(); gateway.expect_send().returning(|_, _| { @@ -593,6 +674,7 @@ mod tests { }); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); mock_ledger.expect_check_solution_basic().returning(|_, _| Ok(())); let ledger: Arc> = Arc::new(mock_ledger); @@ -601,21 +683,21 @@ mod tests { // Create the Worker. let worker = Worker::new(0, Arc::new(gateway), storage, ledger, Default::default()).unwrap(); - let puzzle = PuzzleCommitment::from_g1_affine(rng.gen()); - let transmission_id = TransmissionID::Solution(puzzle); + let solution_id = rng.gen::().into(); + let transmission_id = TransmissionID::Solution(solution_id); let worker_ = worker.clone(); let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); let _ = worker_.send_transmission_request(peer_ip, transmission_id).await; assert!(worker.pending.contains(transmission_id)); let result = worker .process_unconfirmed_solution( - puzzle, + solution_id, Data::Buffer(Bytes::from((0..512).map(|_| rng.gen::()).collect::>())), ) .await; assert!(result.is_ok()); assert!(!worker.pending.contains(transmission_id)); - assert!(worker.ready.contains(puzzle)); + assert!(worker.ready.contains(solution_id)); } #[tokio::test] @@ -623,6 +705,7 @@ mod tests { let rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let mut gateway = MockGateway::default(); gateway.expect_send().returning(|_, _| { @@ -631,6 +714,7 @@ mod tests { }); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); mock_ledger.expect_check_solution_basic().returning(|_, _| Err(anyhow!(""))); let ledger: Arc> = Arc::new(mock_ledger); @@ -639,21 +723,21 @@ mod tests { // Create the Worker. let worker = Worker::new(0, Arc::new(gateway), storage, ledger, Default::default()).unwrap(); - let puzzle = PuzzleCommitment::from_g1_affine(rng.gen()); - let transmission_id = TransmissionID::Solution(puzzle); + let solution_id = rng.gen::().into(); + let transmission_id = TransmissionID::Solution(solution_id); let worker_ = worker.clone(); let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); let _ = worker_.send_transmission_request(peer_ip, transmission_id).await; assert!(worker.pending.contains(transmission_id)); let result = worker .process_unconfirmed_solution( - puzzle, + solution_id, Data::Buffer(Bytes::from((0..512).map(|_| rng.gen::()).collect::>())), ) .await; assert!(result.is_err()); - assert!(!worker.pending.contains(puzzle)); - assert!(!worker.ready.contains(puzzle)); + assert!(!worker.pending.contains(solution_id)); + assert!(!worker.ready.contains(solution_id)); } #[tokio::test] @@ -661,6 +745,7 @@ mod tests { let mut rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let mut gateway = MockGateway::default(); gateway.expect_send().returning(|_, _| { @@ -669,6 +754,7 @@ mod tests { }); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); mock_ledger.expect_check_transaction_basic().returning(|_, _| Ok(())); let ledger: Arc> = Arc::new(mock_ledger); @@ -699,6 +785,7 @@ mod tests { let mut rng = &mut TestRng::default(); // Sample a committee. let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); // Setup the mock gateway and ledger. let mut gateway = MockGateway::default(); gateway.expect_send().returning(|_, _| { @@ -707,6 +794,7 @@ mod tests { }); let mut mock_ledger = MockLedger::default(); mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); mock_ledger.expect_check_transaction_basic().returning(|_, _| Err(anyhow!(""))); let ledger: Arc> = Arc::new(mock_ledger); @@ -731,6 +819,112 @@ mod tests { assert!(!worker.pending.contains(transmission_id)); assert!(!worker.ready.contains(transmission_id)); } + + #[tokio::test] + async fn test_flood_transmission_requests() { + let mut rng = &mut TestRng::default(); + // Sample a committee. + let committee = snarkvm::ledger::committee::test_helpers::sample_committee(rng); + let committee_clone = committee.clone(); + // Setup the mock gateway and ledger. + let mut gateway = MockGateway::default(); + gateway.expect_send().returning(|_, _| { + let (_tx, rx) = oneshot::channel(); + Some(rx) + }); + let mut mock_ledger = MockLedger::default(); + mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + mock_ledger.expect_get_committee_lookback_for_round().returning(move |_| Ok(committee_clone.clone())); + mock_ledger.expect_contains_transmission().returning(|_| Ok(false)); + mock_ledger.expect_check_transaction_basic().returning(|_, _| Ok(())); + let ledger: Arc> = Arc::new(mock_ledger); + // Initialize the storage. + let storage = Storage::::new(ledger.clone(), Arc::new(BFTMemoryService::new()), 1); + + // Create the Worker. + let worker = Worker::new(0, Arc::new(gateway), storage, ledger, Default::default()).unwrap(); + let transaction_id: ::TransactionID = Field::::rand(&mut rng).into(); + let transmission_id = TransmissionID::Transaction(transaction_id); + let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); + + // Determine the number of redundant requests are sent. + let num_redundant_requests = max_redundant_requests(worker.ledger.clone(), worker.storage.current_round()); + let num_flood_requests = num_redundant_requests * 10; + // Flood the pending queue with transmission requests. + for i in 1..=num_flood_requests { + let worker_ = worker.clone(); + tokio::spawn(async move { + let _ = worker_.send_transmission_request(peer_ip, transmission_id).await; + }); + tokio::time::sleep(Duration::from_millis(10)).await; + // Check that the number of sent requests does not exceed the maximum number of redundant requests. + assert!(worker.pending.num_sent_requests(transmission_id) <= num_redundant_requests); + assert_eq!(worker.pending.num_callbacks(transmission_id), i); + } + // Check that the number of sent requests does not exceed the maximum number of redundant requests. + assert_eq!(worker.pending.num_sent_requests(transmission_id), num_redundant_requests); + assert_eq!(worker.pending.num_callbacks(transmission_id), num_flood_requests); + + // Let all the requests expire. + tokio::time::sleep(Duration::from_secs(CALLBACK_EXPIRATION_IN_SECS as u64 + 1)).await; + assert_eq!(worker.pending.num_sent_requests(transmission_id), 0); + assert_eq!(worker.pending.num_callbacks(transmission_id), 0); + + // Flood the pending queue with transmission requests again. + for i in 1..=num_flood_requests { + let worker_ = worker.clone(); + tokio::spawn(async move { + let _ = worker_.send_transmission_request(peer_ip, transmission_id).await; + }); + tokio::time::sleep(Duration::from_millis(10)).await; + assert!(worker.pending.num_sent_requests(transmission_id) <= num_redundant_requests); + assert_eq!(worker.pending.num_callbacks(transmission_id), i); + } + // Check that the number of sent requests does not exceed the maximum number of redundant requests. + assert_eq!(worker.pending.num_sent_requests(transmission_id), num_redundant_requests); + assert_eq!(worker.pending.num_callbacks(transmission_id), num_flood_requests); + + // Check that fulfilling a transmission request clears the pending queue. + let result = worker + .process_unconfirmed_transaction( + transaction_id, + Data::Buffer(Bytes::from((0..512).map(|_| rng.gen::()).collect::>())), + ) + .await; + assert!(result.is_ok()); + assert_eq!(worker.pending.num_sent_requests(transmission_id), 0); + assert_eq!(worker.pending.num_callbacks(transmission_id), 0); + assert!(!worker.pending.contains(transmission_id)); + assert!(worker.ready.contains(transmission_id)); + } + + #[tokio::test] + async fn test_storage_gc_on_initialization() { + let rng = &mut TestRng::default(); + + for _ in 0..ITERATIONS { + // Mock the ledger round. + let max_gc_rounds = rng.gen_range(50..=100); + let latest_ledger_round = rng.gen_range((max_gc_rounds + 1)..1000); + let expected_gc_round = latest_ledger_round - max_gc_rounds; + + // Sample a committee. + let committee = + snarkvm::ledger::committee::test_helpers::sample_committee_for_round(latest_ledger_round, rng); + + // Setup the mock gateway and ledger. + let mut mock_ledger = MockLedger::default(); + mock_ledger.expect_current_committee().returning(move || Ok(committee.clone())); + + let ledger: Arc> = Arc::new(mock_ledger); + // Initialize the storage. + let storage = + Storage::::new(ledger.clone(), Arc::new(BFTMemoryService::new()), max_gc_rounds); + + // Ensure that the storage GC round is correct. + assert_eq!(storage.gc_round(), expected_gc_round); + } + } } #[cfg(test)] @@ -745,7 +939,7 @@ mod prop_tests { use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; // Initializes a new test committee. fn new_test_committee(n: u16) -> Committee { diff --git a/node/bft/storage-service/src/memory.rs b/node/bft/storage-service/src/memory.rs index f91623bd3f..38cd430d68 100644 --- a/node/bft/storage-service/src/memory.rs +++ b/node/bft/storage-service/src/memory.rs @@ -20,7 +20,7 @@ use snarkvm::{ use indexmap::{indexset, map::Entry, IndexMap, IndexSet}; use parking_lot::RwLock; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use tracing::error; /// A BFT in-memory storage service. @@ -28,6 +28,8 @@ use tracing::error; pub struct BFTMemoryService { /// The map of `transmission ID` to `(transmission, certificate IDs)` entries. transmissions: RwLock, (Transmission, IndexSet>)>>, + /// The map of `aborted transmission ID` to `certificate IDs` entries. + aborted_transmission_ids: RwLock, IndexSet>>>, } impl Default for BFTMemoryService { @@ -40,7 +42,7 @@ impl Default for BFTMemoryService { impl BFTMemoryService { /// Initializes a new BFT in-memory storage service. pub fn new() -> Self { - Self { transmissions: Default::default() } + Self { transmissions: Default::default(), aborted_transmission_ids: Default::default() } } } @@ -49,10 +51,11 @@ impl StorageService for BFTMemoryService { fn contains_transmission(&self, transmission_id: TransmissionID) -> bool { // Check if the transmission ID exists in storage. self.transmissions.read().contains_key(&transmission_id) + || self.aborted_transmission_ids.read().contains_key(&transmission_id) } /// Returns the transmission for the given `transmission ID`. - /// If the transmission ID does not exist in storage, `None` is returned. + /// If the transmission does not exist in storage, `None` is returned. fn get_transmission(&self, transmission_id: TransmissionID) -> Option> { // Get the transmission. self.transmissions.read().get(&transmission_id).map(|(transmission, _)| transmission).cloned() @@ -63,6 +66,7 @@ impl StorageService for BFTMemoryService { &self, batch_header: &BatchHeader, mut transmissions: HashMap, Transmission>, + aborted_transmissions: HashSet>, ) -> Result, Transmission>> { // Initialize a list for the missing transmissions from storage. let mut missing_transmissions = HashMap::new(); @@ -70,14 +74,21 @@ impl StorageService for BFTMemoryService { let known_transmissions = self.transmissions.read(); // Ensure the declared transmission IDs are all present in storage or the given transmissions map. for transmission_id in batch_header.transmission_ids() { - // If the transmission ID does not exist, ensure it was provided by the caller. + // If the transmission ID does not exist, ensure it was provided by the caller or aborted. if !known_transmissions.contains_key(transmission_id) { // Retrieve the transmission. - let Some(transmission) = transmissions.remove(transmission_id) else { - bail!("Failed to provide a transmission"); - }; - // Append the transmission. - missing_transmissions.insert(*transmission_id, transmission); + match transmissions.remove(transmission_id) { + // Append the transmission if it exists. + Some(transmission) => { + missing_transmissions.insert(*transmission_id, transmission); + } + // If the transmission does not exist, check if it was aborted. + None => { + if !aborted_transmissions.contains(transmission_id) { + bail!("Failed to provide a transmission"); + } + } + } } } Ok(missing_transmissions) @@ -88,10 +99,13 @@ impl StorageService for BFTMemoryService { &self, certificate_id: Field, transmission_ids: IndexSet>, + aborted_transmission_ids: HashSet>, mut missing_transmissions: HashMap, Transmission>, ) { // Acquire the transmissions write lock. let mut transmissions = self.transmissions.write(); + // Acquire the aborted transmission IDs write lock. + let mut aborted_transmission_ids_lock = self.aborted_transmission_ids.write(); // Inserts the following: // - Inserts **only the missing** transmissions from storage. // - Inserts the certificate ID into the corresponding set for **all** transmissions. @@ -106,7 +120,9 @@ impl StorageService for BFTMemoryService { Entry::Vacant(vacant_entry) => { // Retrieve the missing transmission. let Some(transmission) = missing_transmissions.remove(&transmission_id) else { - error!("Failed to provide a missing transmission {transmission_id}"); + if !aborted_transmission_ids.contains(&transmission_id) { + error!("Failed to provide a missing transmission {transmission_id}"); + } continue 'outer; }; // Prepare the set of certificate IDs. @@ -116,6 +132,23 @@ impl StorageService for BFTMemoryService { } } } + // Inserts the aborted transmission IDs. + for aborted_transmission_id in aborted_transmission_ids { + // Retrieve the transmission entry. + match aborted_transmission_ids_lock.entry(aborted_transmission_id) { + Entry::Occupied(mut occupied_entry) => { + let certificate_ids = occupied_entry.get_mut(); + // Insert the certificate ID into the set. + certificate_ids.insert(certificate_id); + } + Entry::Vacant(vacant_entry) => { + // Prepare the set of certificate IDs. + let certificate_ids = indexset! { certificate_id }; + // Insert the transmission and a new set with the certificate ID. + vacant_entry.insert(certificate_ids); + } + } + } } /// Removes the certificate ID for the transmissions from storage. @@ -124,6 +157,8 @@ impl StorageService for BFTMemoryService { fn remove_transmissions(&self, certificate_id: &Field, transmission_ids: &IndexSet>) { // Acquire the transmissions write lock. let mut transmissions = self.transmissions.write(); + // Acquire the aborted transmission IDs write lock. + let mut aborted_transmission_ids = self.aborted_transmission_ids.write(); // If this is the last certificate ID for the transmission ID, remove the transmission. for transmission_id in transmission_ids { // Remove the certificate ID for the transmission ID, and determine if there are any more certificate IDs. @@ -140,6 +175,20 @@ impl StorageService for BFTMemoryService { } Entry::Vacant(_) => {} } + // Remove the certificate ID for the aborted transmission ID, and determine if there are any more certificate IDs. + match aborted_transmission_ids.entry(*transmission_id) { + Entry::Occupied(mut occupied_entry) => { + let certificate_ids = occupied_entry.get_mut(); + // Remove the certificate ID for the transmission ID. + certificate_ids.swap_remove(certificate_id); + // If there are no more certificate IDs for the transmission ID, remove the transmission. + if certificate_ids.is_empty() { + // Remove the entry for the transmission ID. + occupied_entry.shift_remove(); + } + } + Entry::Vacant(_) => {} + } } } diff --git a/node/bft/storage-service/src/persistent.rs b/node/bft/storage-service/src/persistent.rs index a44b428e18..b18b3771d7 100644 --- a/node/bft/storage-service/src/persistent.rs +++ b/node/bft/storage-service/src/persistent.rs @@ -33,7 +33,10 @@ use snarkvm::{ use aleo_std::StorageMode; use indexmap::{indexset, IndexSet}; -use std::{borrow::Cow, collections::HashMap}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, +}; use tracing::error; /// A BFT persistent storage service. @@ -41,19 +44,37 @@ use tracing::error; pub struct BFTPersistentStorage { /// The map of `transmission ID` to `(transmission, certificate IDs)` entries. transmissions: DataMap, (Transmission, IndexSet>)>, + /// The map of `aborted transmission ID` to `certificate IDs` entries. + aborted_transmission_ids: DataMap, IndexSet>>, } impl BFTPersistentStorage { /// Initializes a new BFT persistent storage service. pub fn open(storage_mode: StorageMode) -> Result { - Ok(Self { transmissions: internal::RocksDB::open_map(N::ID, storage_mode, MapID::BFT(BFTMap::Transmissions))? }) + Ok(Self { + transmissions: internal::RocksDB::open_map(N::ID, storage_mode.clone(), MapID::BFT(BFTMap::Transmissions))?, + aborted_transmission_ids: internal::RocksDB::open_map( + N::ID, + storage_mode, + MapID::BFT(BFTMap::AbortedTransmissionIDs), + )?, + }) } /// Initializes a new BFT persistent storage service. #[cfg(any(test, feature = "test"))] pub fn open_testing(temp_dir: std::path::PathBuf, dev: Option) -> Result { Ok(Self { - transmissions: internal::RocksDB::open_map_testing(temp_dir, dev, MapID::BFT(BFTMap::Transmissions))?, + transmissions: internal::RocksDB::open_map_testing( + temp_dir.clone(), + dev, + MapID::BFT(BFTMap::Transmissions), + )?, + aborted_transmission_ids: internal::RocksDB::open_map_testing( + temp_dir, + dev, + MapID::BFT(BFTMap::AbortedTransmissionIDs), + )?, }) } } @@ -62,13 +83,19 @@ impl StorageService for BFTPersistentStorage { /// Returns `true` if the storage contains the specified `transmission ID`. fn contains_transmission(&self, transmission_id: TransmissionID) -> bool { // Check if the transmission ID exists in storage. - let result = self.transmissions.contains_key_confirmed(&transmission_id); - // If the result is an error, log the error. - if let Err(error) = &result { - error!("Failed to check if transmission ID exists in storage - {error}"); + match self.transmissions.contains_key_confirmed(&transmission_id) { + Ok(true) => return true, + Ok(false) => (), + Err(error) => error!("Failed to check if transmission ID exists in confirmed storage - {error}"), + } + // Check if the transmission ID is in aborted storage. + match self.aborted_transmission_ids.contains_key_confirmed(&transmission_id) { + Ok(result) => result, + Err(error) => { + error!("Failed to check if aborted transmission ID exists in storage - {error}"); + false + } } - // Return the result. - result.unwrap_or(false) } /// Returns the transmission for the given `transmission ID`. @@ -91,19 +118,27 @@ impl StorageService for BFTPersistentStorage { &self, batch_header: &BatchHeader, mut transmissions: HashMap, Transmission>, + aborted_transmissions: HashSet>, ) -> Result, Transmission>> { // Initialize a list for the missing transmissions from storage. let mut missing_transmissions = HashMap::new(); // Ensure the declared transmission IDs are all present in storage or the given transmissions map. for transmission_id in batch_header.transmission_ids() { - // If the transmission ID does not exist, ensure it was provided by the caller. + // If the transmission ID does not exist, ensure it was provided by the caller or aborted. if !self.contains_transmission(*transmission_id) { // Retrieve the transmission. - let Some(transmission) = transmissions.remove(transmission_id) else { - bail!("Failed to provide a transmission"); - }; - // Append the transmission. - missing_transmissions.insert(*transmission_id, transmission); + match transmissions.remove(transmission_id) { + // Append the transmission if it exists. + Some(transmission) => { + missing_transmissions.insert(*transmission_id, transmission); + } + // If the transmission does not exist, check if it was aborted. + None => { + if !aborted_transmissions.contains(transmission_id) { + bail!("Failed to provide a transmission"); + } + } + } } } Ok(missing_transmissions) @@ -114,6 +149,7 @@ impl StorageService for BFTPersistentStorage { &self, certificate_id: Field, transmission_ids: IndexSet>, + aborted_transmission_ids: HashSet>, mut missing_transmissions: HashMap, Transmission>, ) { // Inserts the following: @@ -135,7 +171,9 @@ impl StorageService for BFTPersistentStorage { Ok(None) => { // Retrieve the missing transmission. let Some(transmission) = missing_transmissions.remove(&transmission_id) else { - error!("Failed to provide a missing transmission {transmission_id}"); + if !aborted_transmission_ids.contains(&transmission_id) { + error!("Failed to provide a missing transmission {transmission_id}"); + } continue 'outer; }; // Prepare the set of certificate IDs. @@ -152,6 +190,34 @@ impl StorageService for BFTPersistentStorage { } } } + // Inserts the aborted transmission IDs. + for aborted_transmission_id in aborted_transmission_ids { + // Retrieve the transmission entry. + match self.aborted_transmission_ids.get_confirmed(&aborted_transmission_id) { + Ok(Some(entry)) => { + let mut certificate_ids = cow_to_cloned!(entry); + // Insert the certificate ID into the set. + certificate_ids.insert(certificate_id); + // Update the transmission entry. + if let Err(e) = self.aborted_transmission_ids.insert(aborted_transmission_id, certificate_ids) { + error!("Failed to insert aborted transmission ID {aborted_transmission_id} into storage - {e}"); + } + } + Ok(None) => { + // Prepare the set of certificate IDs. + let certificate_ids = indexset! { certificate_id }; + // Insert the transmission and a new set with the certificate ID. + if let Err(e) = self.aborted_transmission_ids.insert(aborted_transmission_id, certificate_ids) { + error!("Failed to insert aborted transmission ID {aborted_transmission_id} into storage - {e}"); + } + } + Err(e) => { + error!( + "Failed to process the 'insert' for aborted transmission ID {aborted_transmission_id} into storage - {e}" + ); + } + } + } } /// Removes the certificate ID for the transmissions from storage. @@ -159,7 +225,7 @@ impl StorageService for BFTPersistentStorage { /// If the transmission no longer references any certificate IDs, the entry is removed from storage. fn remove_transmissions(&self, certificate_id: &Field, transmission_ids: &IndexSet>) { // If this is the last certificate ID for the transmission ID, remove the transmission. - 'outer: for transmission_id in transmission_ids { + for transmission_id in transmission_ids { // Retrieve the transmission entry. match self.transmissions.get_confirmed(transmission_id) { Ok(Some(entry)) => { @@ -171,7 +237,6 @@ impl StorageService for BFTPersistentStorage { // Remove the transmission entry. if let Err(e) = self.transmissions.remove(transmission_id) { error!("Failed to remove transmission {transmission_id} (now empty) from storage - {e}"); - continue 'outer; } } // Otherwise, update the transmission entry. @@ -181,14 +246,44 @@ impl StorageService for BFTPersistentStorage { error!( "Failed to remove transmission {transmission_id} for certificate {certificate_id} from storage - {e}" ); - continue 'outer; } } } Ok(None) => { /* no-op */ } Err(e) => { error!("Failed to process the 'remove' for transmission {transmission_id} from storage - {e}"); - continue 'outer; + } + } + // Retrieve the aborted transmission ID entry. + match self.aborted_transmission_ids.get_confirmed(transmission_id) { + Ok(Some(entry)) => { + let mut certificate_ids = cow_to_cloned!(entry); + // Insert the certificate ID into the set. + certificate_ids.swap_remove(certificate_id); + // If there are no more certificate IDs for the transmission ID, remove the transmission. + if certificate_ids.is_empty() { + // Remove the transmission entry. + if let Err(e) = self.aborted_transmission_ids.remove(transmission_id) { + error!( + "Failed to remove aborted transmission ID {transmission_id} (now empty) from storage - {e}" + ); + } + } + // Otherwise, update the transmission entry. + else { + // Update the transmission entry. + if let Err(e) = self.aborted_transmission_ids.insert(*transmission_id, certificate_ids) { + error!( + "Failed to remove aborted transmission ID {transmission_id} for certificate {certificate_id} from storage - {e}" + ); + } + } + } + Ok(None) => { /* no-op */ } + Err(e) => { + error!( + "Failed to process the 'remove' for aborted transmission ID {transmission_id} from storage - {e}" + ); } } } diff --git a/node/bft/storage-service/src/traits.rs b/node/bft/storage-service/src/traits.rs index 8770c770fb..34f088d546 100644 --- a/node/bft/storage-service/src/traits.rs +++ b/node/bft/storage-service/src/traits.rs @@ -18,7 +18,10 @@ use snarkvm::{ }; use indexmap::IndexSet; -use std::{collections::HashMap, fmt::Debug}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, +}; pub trait StorageService: Debug + Send + Sync { /// Returns `true` if the storage contains the specified `transmission ID`. @@ -33,6 +36,7 @@ pub trait StorageService: Debug + Send + Sync { &self, batch_header: &BatchHeader, transmissions: HashMap, Transmission>, + aborted_transmissions: HashSet>, ) -> Result, Transmission>>; /// Inserts the given certificate ID for each of the transmission IDs, using the missing transmissions map, into storage. @@ -40,6 +44,7 @@ pub trait StorageService: Debug + Send + Sync { &self, certificate_id: Field, transmission_ids: IndexSet>, + aborted_transmission_ids: HashSet>, missing_transmissions: HashMap, Transmission>, ); diff --git a/node/bft/tests/bft_e2e.rs b/node/bft/tests/bft_e2e.rs index 796cd50d4e..22f2be82da 100644 --- a/node/bft/tests/bft_e2e.rs +++ b/node/bft/tests/bft_e2e.rs @@ -14,11 +14,13 @@ #[allow(dead_code)] mod common; +#[allow(dead_code)] +mod components; use crate::common::primary::{TestNetwork, TestNetworkConfig}; use deadline::deadline; use itertools::Itertools; -use snarkos_node_bft::MAX_BATCH_DELAY_IN_MS; +use snarkos_node_bft::MAX_FETCH_TIMEOUT_IN_MS; use std::time::Duration; use tokio::time::sleep; @@ -114,7 +116,7 @@ async fn test_quorum_threshold() { // Start the cannons for node 0. network.fire_transmissions_at(0, TRANSMISSION_INTERVAL_MS); - sleep(Duration::from_millis(MAX_BATCH_DELAY_IN_MS * 2)).await; + sleep(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS)).await; // Check each node is still at round 1. for validator in network.validators.values() { @@ -125,7 +127,7 @@ async fn test_quorum_threshold() { network.connect_validators(0, 1).await; network.fire_transmissions_at(1, TRANSMISSION_INTERVAL_MS); - sleep(Duration::from_millis(MAX_BATCH_DELAY_IN_MS * 2)).await; + sleep(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS)).await; // Check each node is still at round 1. for validator in network.validators.values() { diff --git a/node/bft/tests/common/mod.rs b/node/bft/tests/common/mod.rs index e81ccc3d74..febf3dc3a5 100644 --- a/node/bft/tests/common/mod.rs +++ b/node/bft/tests/common/mod.rs @@ -13,8 +13,9 @@ // limitations under the License. pub mod primary; +pub mod test_peer; pub mod utils; -pub type CurrentNetwork = snarkvm::prelude::Testnet3; +pub type CurrentNetwork = snarkvm::prelude::MainnetV0; pub type TranslucentLedgerService = snarkos_node_bft_ledger_service::TranslucentLedgerService; diff --git a/node/bft/tests/common/primary.rs b/node/bft/tests/common/primary.rs index c24848eb24..abef3f015e 100644 --- a/node/bft/tests/common/primary.rs +++ b/node/bft/tests/common/primary.rs @@ -23,29 +23,22 @@ use snarkos_node_bft::{ Primary, BFT, MAX_BATCH_DELAY_IN_MS, - MAX_GC_ROUNDS, }; use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ - console::algorithms::BHP256, + console::{ + account::{Address, PrivateKey}, + algorithms::{Hash, BHP256}, + network::Network, + }, ledger::{ block::Block, committee::{Committee, MIN_VALIDATOR_STAKE}, - Ledger, - }, - prelude::{ + narwhal::BatchHeader, store::{helpers::memory::ConsensusMemory, ConsensusStore}, - Address, - CryptoRng, - FromBytes, - Hash, - PrivateKey, - Rng, - TestRng, - ToBits, - ToBytes, - VM, + Ledger, }, + prelude::{CryptoRng, FromBytes, Rng, TestRng, ToBits, ToBytes, VM}, utilities::to_bytes_le, }; @@ -134,14 +127,22 @@ impl TestValidator { impl TestNetwork { // Creates a new test network with the given configuration. pub fn new(config: TestNetworkConfig) -> Self { + let mut rng = TestRng::default(); + if let Some(log_level) = config.log_level { initialize_logger(log_level); } - let (accounts, committee) = new_test_committee(config.num_nodes); + let (accounts, committee) = new_test_committee(config.num_nodes, &mut rng); + let bonded_balances: IndexMap<_, _> = committee + .members() + .iter() + .map(|(address, (amount, _))| (*address, (*address, *address, *amount))) + .collect(); let gen_key = *accounts[0].private_key(); - let public_balance_per_validator = - (1_500_000_000_000_000 - (config.num_nodes as u64) * 1_000_000_000_000) / (config.num_nodes as u64); + let public_balance_per_validator = (CurrentNetwork::STARTING_SUPPLY + - (config.num_nodes as u64) * MIN_VALIDATOR_STAKE) + / (config.num_nodes as u64); let mut balances = IndexMap::, u64>::new(); for account in accounts.iter() { balances.insert(account.address(), public_balance_per_validator); @@ -149,10 +150,14 @@ impl TestNetwork { let mut validators = HashMap::with_capacity(config.num_nodes as usize); for (id, account) in accounts.into_iter().enumerate() { - let mut rng = TestRng::fixed(id as u64); - let gen_ledger = genesis_ledger(gen_key, committee.clone(), balances.clone(), &mut rng); + let gen_ledger = + genesis_ledger(gen_key, committee.clone(), balances.clone(), bonded_balances.clone(), &mut rng); let ledger = Arc::new(TranslucentLedgerService::new(gen_ledger, Default::default())); - let storage = Storage::new(ledger.clone(), Arc::new(BFTMemoryService::new()), MAX_GC_ROUNDS); + let storage = Storage::new( + ledger.clone(), + Arc::new(BFTMemoryService::new()), + BatchHeader::::MAX_GC_ROUNDS as u64, + ); let (primary, bft) = if config.bft { let bft = BFT::::new(account, storage, ledger, None, &[], Some(id as u16)).unwrap(); @@ -325,12 +330,12 @@ impl TestNetwork { } // Initializes a new test committee. -fn new_test_committee(n: u16) -> (Vec>, Committee) { +pub fn new_test_committee(n: u16, rng: &mut TestRng) -> (Vec>, Committee) { let mut accounts = Vec::with_capacity(n as usize); let mut members = IndexMap::with_capacity(n as usize); for i in 0..n { // Sample the account. - let account = Account::new(&mut TestRng::fixed(i as u64)).unwrap(); + let account = Account::new(rng).unwrap(); info!("Validator {}: {}", i, account.address()); members.insert(account.address(), (MIN_VALIDATOR_STAKE, false)); @@ -347,10 +352,11 @@ fn genesis_cache() -> &'static Mutex, Block>> { CACHE.get_or_init(|| Mutex::new(HashMap::new())) } -fn genesis_block( +pub fn genesis_block( genesis_private_key: PrivateKey, committee: Committee, public_balances: IndexMap, u64>, + bonded_balances: IndexMap, (Address, Address, u64)>, rng: &mut (impl Rng + CryptoRng), ) -> Block { // Initialize the store. @@ -358,13 +364,14 @@ fn genesis_block( // Initialize a new VM. let vm = VM::from(store).unwrap(); // Initialize the genesis block. - vm.genesis_quorum(&genesis_private_key, committee, public_balances, rng).unwrap() + vm.genesis_quorum(&genesis_private_key, committee, public_balances, bonded_balances, rng).unwrap() } -fn genesis_ledger( +pub fn genesis_ledger( genesis_private_key: PrivateKey, committee: Committee, public_balances: IndexMap, u64>, + bonded_balances: IndexMap, (Address, Address, u64)>, rng: &mut (impl Rng + CryptoRng), ) -> CurrentLedger { let cache_key = @@ -383,7 +390,7 @@ fn genesis_ledger( return Block::from_bytes_le(&buffer).unwrap(); } - let block = genesis_block(genesis_private_key, committee, public_balances, rng); + let block = genesis_block(genesis_private_key, committee, public_balances, bonded_balances, rng); std::fs::write(&file_path, block.to_bytes_le().unwrap()).unwrap(); block }) diff --git a/node/bft/tests/common/test_peer.rs b/node/bft/tests/common/test_peer.rs new file mode 100644 index 0000000000..508ab75d6f --- /dev/null +++ b/node/bft/tests/common/test_peer.rs @@ -0,0 +1,146 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common::CurrentNetwork; +use snarkos_node_bft_events::{Event, EventCodec}; + +use std::{ + io, + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Duration, +}; + +use pea2pea::{ + protocols::{Handshake, OnDisconnect, Reading, Writing}, + Config, + Connection, + ConnectionSide, + Node, + Pea2Pea, +}; + +use tokio::{ + sync::mpsc::{self, Receiver, Sender}, + time::timeout, +}; + +pub struct TestPeer { + inner_node: InnerNode, + inbound_rx: Receiver<(SocketAddr, Event)>, +} + +#[derive(Clone)] +struct InnerNode { + // The pea2pea node instance. + node: Node, + // The inbound channel sender, used to consolidate inbound messages into a single queue so they + // can be read in order in tests. + inbound_tx: Sender<(SocketAddr, Event)>, +} + +impl TestPeer { + pub async fn new() -> Self { + let (tx, rx) = mpsc::channel(100); + let inner_node = InnerNode { + node: Node::new(Config { + max_connections: 200, + listener_addr: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)), + ..Default::default() + }), + inbound_tx: tx, + }; + + inner_node.enable_handshake().await; + inner_node.enable_reading().await; + inner_node.enable_writing().await; + inner_node.enable_disconnect().await; + inner_node.node().start_listening().await.unwrap(); + + Self { inner_node, inbound_rx: rx } + } + + pub fn listening_addr(&self) -> SocketAddr { + self.inner_node.node().listening_addr().expect("addr should be present") + } + + pub async fn connect(&self, target: SocketAddr) -> io::Result<()> { + self.inner_node.node().connect(target).await?; + Ok(()) + } + + // Note: the codec doesn't actually support sending bytes post-handshake, perhaps this should + // be relaxed by making a test-only codec in future. + pub fn unicast(&self, target: SocketAddr, message: Event) -> io::Result<()> { + self.inner_node.unicast(target, message)?; + Ok(()) + } + + pub async fn recv(&mut self) -> (SocketAddr, Event) { + match self.inbound_rx.recv().await { + Some(message) => message, + None => panic!("all senders dropped!"), + } + } + + pub async fn recv_timeout(&mut self, duration: Duration) -> (SocketAddr, Event) { + match timeout(duration, self.recv()).await { + Ok(message) => message, + _ => panic!("timed out waiting for message"), + } + } +} + +impl Pea2Pea for InnerNode { + fn node(&self) -> &Node { + &self.node + } +} + +impl Handshake for InnerNode { + // Set the timeout on the test peer to be longer than the gateway's timeout. + const TIMEOUT_MS: u64 = 10_000; + + async fn perform_handshake(&self, connection: Connection) -> io::Result { + // Don't perform the Aleo handshake so we can test the edge cases fully. + Ok(connection) + } +} + +impl Writing for InnerNode { + type Codec = EventCodec; + type Message = Event; + + fn codec(&self, _peer_addr: SocketAddr, _side: ConnectionSide) -> Self::Codec { + Default::default() + } +} + +impl Reading for InnerNode { + type Codec = EventCodec; + type Message = Event; + + fn codec(&self, _peer_addr: SocketAddr, _side: ConnectionSide) -> Self::Codec { + Default::default() + } + + async fn process_message(&self, peer_addr: SocketAddr, message: Self::Message) -> io::Result<()> { + self.inbound_tx.send((peer_addr, message)).await.map_err(|_| { + io::Error::new(io::ErrorKind::Other, "failed to send message to test peer, all receivers have been dropped") + }) + } +} + +impl OnDisconnect for InnerNode { + async fn on_disconnect(&self, _peer_addr: SocketAddr) {} +} diff --git a/node/bft/tests/common/utils.rs b/node/bft/tests/common/utils.rs index b2d5d5520f..bd558f418a 100644 --- a/node/bft/tests/common/utils.rs +++ b/node/bft/tests/common/utils.rs @@ -12,13 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::common::CurrentNetwork; -use snarkos_node_bft::helpers::PrimarySender; +use crate::common::{primary, CurrentNetwork, TranslucentLedgerService}; +use snarkos_account::Account; +use snarkos_node_bft::{ + helpers::{PrimarySender, Storage}, + Gateway, + Worker, +}; + +use snarkos_node_bft_storage_service::BFTMemoryService; use snarkvm::{ - ledger::narwhal::Data, + console::account::Address, + ledger::{ + committee::Committee, + narwhal::{BatchHeader, Data}, + store::helpers::memory::ConsensusMemory, + }, prelude::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, + committee::MIN_VALIDATOR_STAKE, + puzzle::{Solution, SolutionID}, Field, Network, TestRng, @@ -26,11 +39,13 @@ use snarkvm::{ }, }; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use ::bytes::Bytes; +use indexmap::IndexMap; +use parking_lot::RwLock; use rand::Rng; -use tokio::{sync::oneshot, task, task::JoinHandle, time::sleep}; +use tokio::{sync::oneshot, task::JoinHandle, time::sleep}; use tracing::*; use tracing_subscriber::{ layer::{Layer, SubscriberExt}, @@ -83,32 +98,29 @@ pub fn fire_unconfirmed_solutions( // This RNG samples *different* fake solutions for each node. let mut unique_rng = TestRng::fixed(node_id as u64); - // A closure to generate a commitment and solution. - async fn sample(mut rng: impl Rng) -> (PuzzleCommitment, Data>) { - // Sample a random fake puzzle commitment. - // TODO (howardwu): Use a mutex to bring in the real 'proof target' and change this sampling to a while loop. - let affine = rng.gen(); - let commitment = - task::spawn_blocking(move || PuzzleCommitment::::from_g1_affine(affine)).await.unwrap(); + // A closure to generate a solution ID and solution. + async fn sample(mut rng: impl Rng) -> (SolutionID, Data>) { + // Sample a random fake solution ID. + let solution_id = rng.gen::().into(); // Sample random fake solution bytes. let mut vec = vec![0u8; 1024]; rng.fill_bytes(&mut vec); let solution = Data::Buffer(Bytes::from(vec)); - // Return the ID and solution. - (commitment, solution) + // Return the solution ID and solution. + (solution_id, solution) } // Initialize a counter. let mut counter = 0; loop { - // Sample a random fake puzzle commitment and solution. - let (commitment, solution) = + // Sample a random fake solution ID and solution. + let (solution_id, solution) = if counter % 2 == 0 { sample(&mut shared_rng).await } else { sample(&mut unique_rng).await }; // Initialize a callback sender and receiver. let (callback, callback_receiver) = oneshot::channel(); // Send the fake solution. - if let Err(e) = tx_unconfirmed_solution.send((commitment, solution, callback)).await { + if let Err(e) = tx_unconfirmed_solution.send((solution_id, solution, callback)).await { error!("Failed to send unconfirmed solution: {e}"); } let _ = callback_receiver.await; @@ -167,3 +179,56 @@ pub fn fire_unconfirmed_transactions( } }) } + +/// Samples a new ledger with the given number of nodes. +pub fn sample_ledger( + accounts: &[Account], + committee: &Committee, + rng: &mut TestRng, +) -> Arc>> { + let num_nodes = committee.num_members(); + let bonded_balances: IndexMap<_, _> = + committee.members().iter().map(|(address, (amount, _))| (*address, (*address, *address, *amount))).collect(); + let gen_key = *accounts[0].private_key(); + let public_balance_per_validator = + (CurrentNetwork::STARTING_SUPPLY - (num_nodes as u64) * MIN_VALIDATOR_STAKE) / (num_nodes as u64); + let mut balances = IndexMap::, u64>::new(); + for account in accounts.iter() { + balances.insert(account.address(), public_balance_per_validator); + } + + let gen_ledger = + primary::genesis_ledger(gen_key, committee.clone(), balances.clone(), bonded_balances.clone(), rng); + Arc::new(TranslucentLedgerService::new(gen_ledger, Default::default())) +} + +/// Samples a new storage with the given ledger. +pub fn sample_storage(ledger: Arc>>) -> Storage { + Storage::new(ledger, Arc::new(BFTMemoryService::new()), BatchHeader::::MAX_GC_ROUNDS as u64) +} + +/// Samples a new gateway with the given ledger. +pub fn sample_gateway( + account: Account, + storage: Storage, + ledger: Arc>>, +) -> Gateway { + // Initialize the gateway. + Gateway::new(account, storage, ledger, None, &[], None).unwrap() +} + +/// Samples a new worker with the given ledger. +pub fn sample_worker( + id: u8, + account: Account, + ledger: Arc>>, +) -> Worker { + // Sample a storage. + let storage = sample_storage(ledger.clone()); + // Sample a gateway. + let gateway = sample_gateway(account, storage.clone(), ledger.clone()); + // Sample a dummy proposed batch. + let proposed_batch = Arc::new(RwLock::new(None)); + // Construct the worker instance. + Worker::new(id, Arc::new(gateway.clone()), storage.clone(), ledger, proposed_batch).unwrap() +} diff --git a/node/bft/tests/components/mod.rs b/node/bft/tests/components/mod.rs new file mode 100644 index 0000000000..d58afbf610 --- /dev/null +++ b/node/bft/tests/components/mod.rs @@ -0,0 +1,18 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod pending; +pub mod worker; + +const ITERATIONS: u32 = 100; diff --git a/node/bft/tests/components/pending.rs b/node/bft/tests/components/pending.rs new file mode 100644 index 0000000000..2825637369 --- /dev/null +++ b/node/bft/tests/components/pending.rs @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common::{primary::new_test_committee, utils::sample_ledger, CurrentNetwork}; +use snarkos_node_bft::helpers::max_redundant_requests; +use snarkvm::{ledger::committee::Committee, prelude::TestRng}; + +#[test] +fn test_max_redundant_requests() { + const NUM_NODES: u16 = Committee::::MAX_COMMITTEE_SIZE; + + // Initialize the RNG. + let mut rng = TestRng::default(); + // Initialize the accounts and the committee. + let (accounts, committee) = new_test_committee(NUM_NODES, &mut rng); + // Sample a ledger. + let ledger = sample_ledger(&accounts, &committee, &mut rng); + // Ensure the maximum number of redundant requests is correct and consistent across iterations. + assert_eq!(max_redundant_requests(ledger, 0), 34, "Update me if the formula changes"); +} diff --git a/node/bft/tests/components/worker.rs b/node/bft/tests/components/worker.rs new file mode 100644 index 0000000000..a439a8bdfe --- /dev/null +++ b/node/bft/tests/components/worker.rs @@ -0,0 +1,150 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common::{ + primary::new_test_committee, + utils::{sample_ledger, sample_worker}, + CurrentNetwork, +}; +use snarkos_node_bft::helpers::max_redundant_requests; +use snarkvm::{ + ledger::{committee::Committee, narwhal::TransmissionID}, + prelude::{Network, TestRng}, +}; + +use std::net::SocketAddr; + +#[tokio::test] +#[rustfmt::skip] +async fn test_resend_transmission_request() { + const NUM_NODES: u16 = Committee::::MAX_COMMITTEE_SIZE; + + // Initialize the RNG. + let mut rng = TestRng::default(); + // Initialize the accounts and the committee. + let (accounts, committee) = new_test_committee(NUM_NODES, &mut rng); + // Sample a ledger. + let ledger = sample_ledger(&accounts, &committee, &mut rng); + // Sample a worker. + let worker = sample_worker(0, accounts[0].clone(), ledger.clone()); + + // Determine the maximum number of redundant requests. + let max_redundancy = max_redundant_requests(ledger.clone(), 0); + assert_eq!(max_redundancy, 34, "Update me if the formula changes"); + + // Prepare a dummy transmission ID. + let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); + let transmission_id = TransmissionID::Transaction(::TransactionID::default()); + + // Ensure the worker does not have the dummy transmission ID. + assert!(!worker.contains_transmission(transmission_id), "Transmission should not exist"); + + // Send a request to fetch the dummy transmission. + let worker_ = worker.clone(); + tokio::spawn(async move { worker_.get_or_fetch_transmission(peer_ip, transmission_id).await }); + + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + + let pending = worker.pending(); + // Ensure the transmission ID exists in the pending queue. + assert!(pending.contains(transmission_id), "Missing a transmission in the pending queue"); + // Ensure the peer IP is in the pending queue for the transmission ID. + assert!(pending.contains_peer(transmission_id, peer_ip), "Missing a peer IP for transmission in the pending queue"); + assert_eq!(pending.get_peers(transmission_id), Some([peer_ip].into_iter().collect()), "Missing a peer IP for transmission in the pending queue"); + // Ensure the number of callbacks is correct. + assert_eq!(pending.num_callbacks(transmission_id), 1, "Incorrect number of callbacks for transmission"); + // Ensure the number of sent requests is correct. + assert_eq!(pending.num_sent_requests(transmission_id), 1, "Incorrect number of sent requests for transmission"); + + // Rebroadcast the same request to fetch the dummy transmission. + for i in 1..=10 { + let worker_ = worker.clone(); + tokio::spawn(async move { worker_.get_or_fetch_transmission(peer_ip, transmission_id).await }); + + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + + // Ensure the transmission ID exists in the pending queue. + assert!(pending.contains(transmission_id), "Missing a transmission in the pending queue"); + // Ensure the peer IP is in the pending queue for the transmission ID. + assert!(pending.contains_peer(transmission_id, peer_ip), "Missing a peer IP for transmission in the pending queue"); + assert_eq!(pending.get_peers(transmission_id), Some([peer_ip].into_iter().collect()), "Missing a peer IP for transmission in the pending queue"); + // Ensure the number of callbacks is correct. + assert_eq!(pending.num_callbacks(transmission_id), 1 + i, "Incorrect number of callbacks for transmission"); + // Ensure the number of sent requests is correct. + assert_eq!(pending.num_sent_requests(transmission_id), (1 + i).min(max_redundancy), "Incorrect number of sent requests for transmission"); + } +} + +#[tokio::test] +#[rustfmt::skip] +async fn test_flood_transmission_requests() { + const NUM_NODES: u16 = Committee::::MAX_COMMITTEE_SIZE; + + // Initialize the RNG. + let mut rng = TestRng::default(); + // Initialize the accounts and the committee. + let (accounts, committee) = new_test_committee(NUM_NODES, &mut rng); + // Sample a ledger. + let ledger = sample_ledger(&accounts, &committee, &mut rng); + // Sample a worker. + let worker = sample_worker(0, accounts[0].clone(), ledger.clone()); + + // Determine the maximum number of redundant requests. + let max_redundancy = max_redundant_requests(ledger.clone(), 0); + assert_eq!(max_redundancy, 34, "Update me if the formula changes"); + + // Prepare a dummy transmission ID. + let peer_ip = SocketAddr::from(([127, 0, 0, 1], 1234)); + let transmission_id = TransmissionID::Transaction(::TransactionID::default()); + + // Ensure the worker does not have the dummy transmission ID. + assert!(!worker.contains_transmission(transmission_id), "Transmission should not exist"); + + // Send the maximum number of redundant requests to fetch the dummy transmission. + for _ in 0..max_redundancy { + let worker_ = worker.clone(); + tokio::spawn(async move { worker_.get_or_fetch_transmission(peer_ip, transmission_id).await }); + } + + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + + let pending = worker.pending(); + // Ensure the transmission ID exists in the pending queue. + assert!(pending.contains(transmission_id), "Missing a transmission in the pending queue"); + // Ensure the peer IP is in the pending queue for the transmission ID. + assert!(pending.contains_peer(transmission_id, peer_ip), "Missing a peer IP for transmission in the pending queue"); + assert_eq!(pending.get_peers(transmission_id), Some([peer_ip].into_iter().collect()), "Missing a peer IP for transmission in the pending queue"); + // Ensure the number of callbacks is correct. + assert_eq!(pending.num_callbacks(transmission_id), max_redundancy, "Incorrect number of callbacks for transmission"); + // Ensure the number of sent requests is correct. + assert_eq!(pending.num_sent_requests(transmission_id), max_redundancy, "Incorrect number of sent requests for transmission"); + + // Ensure any further redundant requests are not sent. + for i in 1..=20 { + let worker_ = worker.clone(); + tokio::spawn(async move { worker_.get_or_fetch_transmission(peer_ip, transmission_id).await }); + + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + + // Ensure the transmission ID exists in the pending queue. + assert!(pending.contains(transmission_id), "Missing a transmission in the pending queue"); + // Ensure the peer IP is in the pending queue for the transmission ID. + assert!(pending.contains_peer(transmission_id, peer_ip), "Missing a peer IP for transmission in the pending queue"); + assert_eq!(pending.get_peers(transmission_id), Some([peer_ip].into_iter().collect()), "Missing a peer IP for transmission in the pending queue"); + // Ensure the number of callbacks is correct. + assert_eq!(pending.num_callbacks(transmission_id), max_redundancy + i, "Incorrect number of callbacks for transmission"); + // Ensure the number of sent requests is correct. + assert_eq!(pending.num_sent_requests(transmission_id), max_redundancy, "Incorrect number of sent requests for transmission"); + } +} diff --git a/node/bft/tests/gateway_e2e.rs b/node/bft/tests/gateway_e2e.rs new file mode 100644 index 0000000000..46a270bf56 --- /dev/null +++ b/node/bft/tests/gateway_e2e.rs @@ -0,0 +1,270 @@ +// Copyright (C) 2019-2023 Aleo Systems Inc. +// This file is part of the snarkOS library. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[allow(dead_code)] +mod common; + +use crate::common::{ + primary::new_test_committee, + test_peer::TestPeer, + utils::{sample_gateway, sample_ledger, sample_storage}, + CurrentNetwork, +}; +use snarkos_account::Account; +use snarkos_node_bft::{helpers::init_primary_channels, Gateway}; +use snarkos_node_bft_events::{ChallengeRequest, ChallengeResponse, Disconnect, DisconnectReason, Event, WorkerPing}; +use snarkos_node_tcp::P2P; +use snarkvm::{ledger::narwhal::Data, prelude::TestRng}; + +use std::time::Duration; + +use deadline::deadline; +use rand::Rng; + +async fn new_test_gateway( + num_nodes: u16, + rng: &mut TestRng, +) -> (Vec>, Gateway) { + let (accounts, committee) = new_test_committee(num_nodes, rng); + let ledger = sample_ledger(&accounts, &committee, rng); + let storage = sample_storage(ledger.clone()); + let gateway = sample_gateway(accounts[0].clone(), storage, ledger); + + // Set up primary channels, we discard the rx as we're testing the gateway sans BFT. + let (primary_tx, _primary_rx) = init_primary_channels(); + + gateway.run(primary_tx, [].into(), None).await; + + (accounts, gateway) +} + +// The test peer connects to the gateway and completes the no-op handshake (so +// the connection is registered). The gateway's handshake should timeout. +#[tokio::test(flavor = "multi_thread")] +async fn handshake_responder_side_timeout() { + const NUM_NODES: u16 = 4; + + let mut rng = TestRng::default(); + let (_accounts, gateway) = new_test_gateway(NUM_NODES, &mut rng).await; + let test_peer = TestPeer::new().await; + + dbg!(test_peer.listening_addr()); + + // Initiate a connection with the gateway, this will only return once the handshake protocol has + // completed on the test peer's side, which is a no-op. + assert!(test_peer.connect(gateway.local_ip()).await.is_ok()); + + /* Don't send any further messages and wait for the gateway to timeout. */ + + // Check the connection has been registered. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 1); + + // Check the tcp stack's connection counts, wait longer than the gateway's timeout to ensure + // connecting peers are cleared. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(5), move || gateway_clone.tcp().num_connecting() == 0); + + // Check the test peer hasn't been added to the gateway's connected peers. + assert!(gateway.connected_peers().read().is_empty()); + assert_eq!(gateway.tcp().num_connected(), 0); +} + +// The test peer connects to the gateway and sends an unexpected event. +// The gateway's handshake should be interrupted and the peer should be +// disconnected. +macro_rules! handshake_responder_side_unexpected_event { + ($test_name:ident, $payload:expr) => { + paste::paste! { + #[tokio::test(flavor = "multi_thread")] + async fn []() { + const NUM_NODES: u16 = 4; + + let mut rng = TestRng::default(); + let (_accounts, gateway) = new_test_gateway(NUM_NODES, &mut rng).await; + let test_peer = TestPeer::new().await; + + // Initiate a connection with the gateway, this will only return once the handshake protocol has + // completed on the test peer's side, which is a no-op. + assert!(test_peer.connect(gateway.local_ip()).await.is_ok()); + + // Check the connection has been registered. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 1); + + // Send an unexpected event. + let _ = test_peer.unicast( + gateway.local_ip(), + $payload + ); + + // Check the tcp stack's connection counts, make sure the disconnect interrupted handshaking, + // wait a short time to ensure the gateway has time to process the disconnect (note: this is + // shorter than the gateway's timeout, so we can ensure that's not the reason for the + // disconnect). + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 0); + + // Check the test peer hasn't been added to the gateway's connected peers. + assert!(gateway.connected_peers().read().is_empty()); + assert_eq!(gateway.tcp().num_connected(), 0); + } + } + }; +} + +/* Unexpected disconnects. */ + +macro_rules! handshake_responder_side_unexpected_disconnect { + ($($reason:ident),*) => { + $( + paste::paste! { + handshake_responder_side_unexpected_event!( + [], + Event::Disconnect(Disconnect::from(DisconnectReason::$reason)) + ); + } + )* + } + } + +handshake_responder_side_unexpected_disconnect!( + ProtocolViolation, + NoReasonGiven, + InvalidChallengeResponse, + OutdatedClientVersion +); + +/* Other unexpected event types */ + +handshake_responder_side_unexpected_event!(worker_ping, Event::WorkerPing(WorkerPing::new([].into()))); + +// TODO(nkls): other event types, can be done as a follow up. + +/* Invalid challenge request */ + +#[tokio::test(flavor = "multi_thread")] +async fn handshake_responder_side_invalid_challenge_request() { + const NUM_NODES: u16 = 4; + + let mut rng = TestRng::default(); + let (accounts, gateway) = new_test_gateway(NUM_NODES, &mut rng).await; + let test_peer = TestPeer::new().await; + + // Initiate a connection with the gateway, this will only return once the handshake protocol has + // completed on the test peer's side, which is a no-op. + assert!(test_peer.connect(gateway.local_ip()).await.is_ok()); + + // Check the connection has been registered. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 1); + + // Use the address from the second peer in the list, the test peer will use the first. + let listener_port = test_peer.listening_addr().port(); + let address = accounts.get(1).unwrap().address(); + let nonce = rng.gen(); + // Set the wrong version so the challenge request is invalid. + let challenge_request = ChallengeRequest { version: 0, listener_port, address, nonce }; + + // Send the message + let _ = test_peer.unicast(gateway.local_ip(), Event::ChallengeRequest(challenge_request)); + + // FIXME(nkls): currently we can't assert on the disconnect type, the message isn't always sent + // before the disconnect. + + // Check the test peer has been removed from the gateway's connecting peers. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 0); + // Check the test peer hasn't been added to the gateway's connected peers. + assert!(gateway.connected_peers().read().is_empty()); + assert_eq!(gateway.tcp().num_connected(), 0); +} + +/* Invalid challenge response */ + +#[tokio::test(flavor = "multi_thread")] +async fn handshake_responder_side_invalid_challenge_response() { + const NUM_NODES: u16 = 4; + + let mut rng = TestRng::default(); + let (accounts, gateway) = new_test_gateway(NUM_NODES, &mut rng).await; + let mut test_peer = TestPeer::new().await; + + // Initiate a connection with the gateway, this will only return once the handshake protocol has + // completed on the test peer's side, which is a no-op for the moment. + assert!(test_peer.connect(gateway.local_ip()).await.is_ok()); + + // Check the connection has been registered. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 1); + + // Use the address from the second peer in the list, the test peer will use the first. + let listener_port = test_peer.listening_addr().port(); + let address = accounts.get(1).unwrap().address(); + let our_nonce = rng.gen(); + let challenge_request = ChallengeRequest { version: 6, listener_port, address, nonce: our_nonce }; + + // Send the challenge request. + let _ = test_peer.unicast(gateway.local_ip(), Event::ChallengeRequest(challenge_request)); + + // Receive the gateway's challenge response. + let (peer_addr, Event::ChallengeResponse(ChallengeResponse { signature, nonce })) = + test_peer.recv_timeout(Duration::from_secs(1)).await + else { + panic!("Expected challenge response") + }; + + // Check the sender is the gateway. + assert_eq!(peer_addr, gateway.local_ip()); + // Check the nonce we sent is in the signature. + assert!( + signature.deserialize_blocking().unwrap().verify_bytes( + &accounts.first().unwrap().address(), + &[our_nonce.to_le_bytes(), nonce.to_le_bytes()].concat() + ) + ); + + // Receive the gateway's challenge request. + let (peer_addr, Event::ChallengeRequest(challenge_request)) = test_peer.recv_timeout(Duration::from_secs(1)).await + else { + panic!("Expected challenge request") + }; + // Check the version, listener port and address are correct. + assert_eq!(peer_addr, gateway.local_ip()); + assert_eq!(challenge_request.version, 6); + assert_eq!(challenge_request.listener_port, gateway.local_ip().port()); + assert_eq!(challenge_request.address, accounts.first().unwrap().address()); + + // Send the challenge response with an invalid signature. + let response_nonce = rng.gen(); + let _ = test_peer.unicast( + gateway.local_ip(), + Event::ChallengeResponse(ChallengeResponse { + signature: Data::Object( + accounts.get(2).unwrap().sign_bytes(&challenge_request.nonce.to_le_bytes(), &mut rng).unwrap(), + ), + nonce: response_nonce, + }), + ); + + // FIXME(nkls): currently we can't assert on the disconnect type, the message isn't always sent + // before the disconnect. + + // Check the test peer has been removed from the gateway's connecting peers. + let gateway_clone = gateway.clone(); + deadline!(Duration::from_secs(1), move || gateway_clone.tcp().num_connecting() == 0); + // Check the test peer hasn't been added to the gateway's connected peers. + assert!(gateway.connected_peers().read().is_empty()); + assert_eq!(gateway.tcp().num_connected(), 0); +} diff --git a/node/bft/tests/narwhal_e2e.rs b/node/bft/tests/narwhal_e2e.rs index b2783b1d90..ebf4c47eb6 100644 --- a/node/bft/tests/narwhal_e2e.rs +++ b/node/bft/tests/narwhal_e2e.rs @@ -16,7 +16,7 @@ mod common; use crate::common::primary::{TestNetwork, TestNetworkConfig}; -use snarkos_node_bft::MAX_BATCH_DELAY_IN_MS; +use snarkos_node_bft::MAX_FETCH_TIMEOUT_IN_MS; use std::time::Duration; @@ -72,7 +72,7 @@ async fn test_quorum_threshold() { // Start the cannons for node 0. network.fire_transmissions_at(0, TRANSMISSION_INTERVAL_MS); - sleep(Duration::from_millis(MAX_BATCH_DELAY_IN_MS * 2)).await; + sleep(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS)).await; // Check each node is still at round 1. for validator in network.validators.values() { @@ -83,7 +83,7 @@ async fn test_quorum_threshold() { network.connect_validators(0, 1).await; network.fire_transmissions_at(1, TRANSMISSION_INTERVAL_MS); - sleep(Duration::from_millis(MAX_BATCH_DELAY_IN_MS * 2)).await; + sleep(Duration::from_millis(MAX_FETCH_TIMEOUT_IN_MS)).await; // Check each node is still at round 1. for validator in network.validators.values() { diff --git a/node/cdn/Cargo.toml b/node/cdn/Cargo.toml index aff5c26cba..2078de4a59 100644 --- a/node/cdn/Cargo.toml +++ b/node/cdn/Cargo.toml @@ -60,5 +60,9 @@ features = [ "rt" ] [dependencies.tracing] version = "0.1" +[dev-dependencies.tokio] +version = "1.28" +features = [ "rt", "rt-multi-thread" ] + [dev-dependencies.tokio-test] version = "0.4" diff --git a/node/cdn/src/blocks.rs b/node/cdn/src/blocks.rs index 8373101579..a1b4b91e3e 100644 --- a/node/cdn/src/blocks.rs +++ b/node/cdn/src/blocks.rs @@ -48,7 +48,7 @@ const MAXIMUM_PENDING_BLOCKS: u32 = BLOCKS_PER_FILE * CONCURRENT_REQUESTS * 2; /// Maximum number of attempts for a request to the CDN. const MAXIMUM_REQUEST_ATTEMPTS: u8 = 10; /// The supported network. -const NETWORK_ID: u16 = 3; +const NETWORK_ID: u16 = 0; /// Loads blocks from a CDN into the ledger. /// @@ -438,12 +438,12 @@ mod tests { blocks::{cdn_get, cdn_height, log_progress, BLOCKS_PER_FILE}, load_blocks, }; - use snarkvm::prelude::{block::Block, Testnet3}; + use snarkvm::prelude::{block::Block, MainnetV0}; use parking_lot::RwLock; use std::{sync::Arc, time::Instant}; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; const TEST_BASE_URL: &str = "https://s3.us-west-1.amazonaws.com/testnet3.blocks/phase3"; @@ -513,7 +513,7 @@ mod tests { rt.block_on(async { let client = reqwest::Client::new(); let height = - cdn_get::(client, &format!("{TEST_BASE_URL}/testnet3/latest/height"), "height").await.unwrap(); + cdn_get::(client, &format!("{TEST_BASE_URL}/mainnet/latest/height"), "height").await.unwrap(); assert!(height > 0); }); } diff --git a/node/consensus/src/lib.rs b/node/consensus/src/lib.rs index afe9911573..530893893d 100644 --- a/node/consensus/src/lib.rs +++ b/node/consensus/src/lib.rs @@ -28,17 +28,16 @@ use snarkos_node_bft::{ Storage as NarwhalStorage, }, spawn_blocking, + Primary, BFT, - MAX_GC_ROUNDS, - MAX_TRANSMISSIONS_PER_BATCH, }; use snarkos_node_bft_ledger_service::LedgerService; use snarkos_node_bft_storage_service::BFTPersistentStorage; use snarkvm::{ ledger::{ block::Transaction, - coinbase::{ProverSolution, PuzzleCommitment}, - narwhal::{Data, Subdag, Transmission, TransmissionID}, + narwhal::{BatchHeader, Data, Subdag, Transmission, TransmissionID}, + puzzle::{Solution, SolutionID}, }, prelude::*, }; @@ -55,6 +54,37 @@ use tokio::{ task::JoinHandle, }; +#[cfg(feature = "metrics")] +use std::collections::HashMap; + +/// The capacity of the queue reserved for deployments. +/// Note: This is an inbound queue capacity, not a Narwhal-enforced capacity. +const CAPACITY_FOR_DEPLOYMENTS: usize = 1 << 10; +/// The capacity of the queue reserved for executions. +/// Note: This is an inbound queue capacity, not a Narwhal-enforced capacity. +const CAPACITY_FOR_EXECUTIONS: usize = 1 << 10; +/// The capacity of the queue reserved for solutions. +/// Note: This is an inbound queue capacity, not a Narwhal-enforced capacity. +const CAPACITY_FOR_SOLUTIONS: usize = 1 << 10; +/// The **suggested** maximum number of deployments in each interval. +/// Note: This is an inbound queue limit, not a Narwhal-enforced limit. +const MAX_DEPLOYMENTS_PER_INTERVAL: usize = 1; + +/// Helper struct to track incoming transactions. +struct TransactionsQueue { + pub deployments: LruCache>, + pub executions: LruCache>, +} + +impl Default for TransactionsQueue { + fn default() -> Self { + Self { + deployments: LruCache::new(NonZeroUsize::new(CAPACITY_FOR_DEPLOYMENTS).unwrap()), + executions: LruCache::new(NonZeroUsize::new(CAPACITY_FOR_EXECUTIONS).unwrap()), + } + } +} + #[derive(Clone)] pub struct Consensus { /// The ledger. @@ -64,13 +94,15 @@ pub struct Consensus { /// The primary sender. primary_sender: Arc>>, /// The unconfirmed solutions queue. - solutions_queue: Arc, ProverSolution>>>, + solutions_queue: Arc, Solution>>>, /// The unconfirmed transactions queue. - transactions_queue: Arc>>>, + transactions_queue: Arc>>, /// The recently-seen unconfirmed solutions. - seen_solutions: Arc, ()>>>, + seen_solutions: Arc, ()>>>, /// The recently-seen unconfirmed transactions. seen_transactions: Arc>>, + #[cfg(feature = "metrics")] + transmissions_queue_timestamps: Arc, i64>>>, /// The spawned handles. handles: Arc>>>, } @@ -92,7 +124,7 @@ impl Consensus { // Initialize the Narwhal transmissions. let transmissions = Arc::new(BFTPersistentStorage::open(storage_mode)?); // Initialize the Narwhal storage. - let storage = NarwhalStorage::new(ledger.clone(), transmissions, MAX_GC_ROUNDS); + let storage = NarwhalStorage::new(ledger.clone(), transmissions, BatchHeader::::MAX_GC_ROUNDS as u64); // Initialize the BFT. let bft = BFT::new(account, storage, ledger.clone(), ip, trusted_validators, dev)?; // Return the consensus. @@ -100,14 +132,12 @@ impl Consensus { ledger, bft, primary_sender: Default::default(), - solutions_queue: Arc::new(Mutex::new(LruCache::new( - NonZeroUsize::new(MAX_TRANSMISSIONS_PER_BATCH).unwrap(), - ))), - transactions_queue: Arc::new(Mutex::new(LruCache::new( - NonZeroUsize::new(MAX_TRANSMISSIONS_PER_BATCH).unwrap(), - ))), + solutions_queue: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(CAPACITY_FOR_SOLUTIONS).unwrap()))), + transactions_queue: Default::default(), seen_solutions: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(1 << 16).unwrap()))), seen_transactions: Arc::new(Mutex::new(LruCache::new(NonZeroUsize::new(1 << 16).unwrap()))), + #[cfg(feature = "metrics")] + transmissions_queue_timestamps: Default::default(), handles: Default::default(), }) } @@ -168,31 +198,95 @@ impl Consensus { impl Consensus { /// Returns the unconfirmed transmission IDs. pub fn unconfirmed_transmission_ids(&self) -> impl '_ + Iterator> { - self.bft.unconfirmed_transmission_ids() + self.worker_transmission_ids().chain(self.inbound_transmission_ids()) } /// Returns the unconfirmed transmissions. pub fn unconfirmed_transmissions(&self) -> impl '_ + Iterator, Transmission)> { - self.bft.unconfirmed_transmissions() + self.worker_transmissions().chain(self.inbound_transmissions()) } /// Returns the unconfirmed solutions. - pub fn unconfirmed_solutions(&self) -> impl '_ + Iterator, Data>)> { - self.bft.unconfirmed_solutions() + pub fn unconfirmed_solutions(&self) -> impl '_ + Iterator, Data>)> { + self.worker_solutions().chain(self.inbound_solutions()) } /// Returns the unconfirmed transactions. pub fn unconfirmed_transactions(&self) -> impl '_ + Iterator>)> { - self.bft.unconfirmed_transactions() + self.worker_transactions().chain(self.inbound_transactions()) + } +} + +impl Consensus { + /// Returns the worker transmission IDs. + pub fn worker_transmission_ids(&self) -> impl '_ + Iterator> { + self.bft.worker_transmission_ids() + } + + /// Returns the worker transmissions. + pub fn worker_transmissions(&self) -> impl '_ + Iterator, Transmission)> { + self.bft.worker_transmissions() + } + + /// Returns the worker solutions. + pub fn worker_solutions(&self) -> impl '_ + Iterator, Data>)> { + self.bft.worker_solutions() + } + + /// Returns the worker transactions. + pub fn worker_transactions(&self) -> impl '_ + Iterator>)> { + self.bft.worker_transactions() + } +} + +impl Consensus { + /// Returns the transmission IDs in the inbound queue. + pub fn inbound_transmission_ids(&self) -> impl '_ + Iterator> { + self.inbound_transmissions().map(|(id, _)| id) + } + + /// Returns the transmissions in the inbound queue. + pub fn inbound_transmissions(&self) -> impl '_ + Iterator, Transmission)> { + self.inbound_transactions() + .map(|(id, tx)| (TransmissionID::Transaction(id), Transmission::Transaction(tx))) + .chain( + self.inbound_solutions() + .map(|(id, solution)| (TransmissionID::Solution(id), Transmission::Solution(solution))), + ) + } + + /// Returns the solutions in the inbound queue. + pub fn inbound_solutions(&self) -> impl '_ + Iterator, Data>)> { + // Return an iterator over the solutions in the inbound queue. + self.solutions_queue.lock().clone().into_iter().map(|(id, solution)| (id, Data::Object(solution))) + } + + /// Returns the transactions in the inbound queue. + pub fn inbound_transactions(&self) -> impl '_ + Iterator>)> { + // Acquire the lock on the transactions queue. + let tx_queue = self.transactions_queue.lock(); + // Return an iterator over the deployment and execution transactions in the inbound queue. + tx_queue + .deployments + .clone() + .into_iter() + .chain(tx_queue.executions.clone()) + .map(|(id, tx)| (id, Data::Object(tx))) } } impl Consensus { /// Adds the given unconfirmed solution to the memory pool. - pub async fn add_unconfirmed_solution(&self, solution: ProverSolution) -> Result<()> { + pub async fn add_unconfirmed_solution(&self, solution: Solution) -> Result<()> { + #[cfg(feature = "metrics")] + { + metrics::increment_gauge(metrics::consensus::UNCONFIRMED_SOLUTIONS, 1f64); + let timestamp = snarkos_node_bft::helpers::now(); + self.transmissions_queue_timestamps.lock().insert(TransmissionID::Solution(solution.id()), timestamp); + } // Process the unconfirmed solution. { - let solution_id = solution.commitment(); + let solution_id = solution.id(); // Check if the transaction was recently seen. if self.seen_solutions.lock().put(solution_id, ()).is_some() { @@ -211,14 +305,17 @@ impl Consensus { } // If the memory pool of this node is full, return early. - let num_unconfirmed = self.num_unconfirmed_transmissions(); - if num_unconfirmed > N::MAX_SOLUTIONS || num_unconfirmed > MAX_TRANSMISSIONS_PER_BATCH { + let num_unconfirmed_solutions = self.num_unconfirmed_solutions(); + let num_unconfirmed_transmissions = self.num_unconfirmed_transmissions(); + if num_unconfirmed_solutions >= N::MAX_SOLUTIONS + || num_unconfirmed_transmissions >= Primary::::MAX_TRANSMISSIONS_TOLERANCE + { return Ok(()); } // Retrieve the solutions. let solutions = { // Determine the available capacity. - let capacity = N::MAX_SOLUTIONS.saturating_sub(num_unconfirmed); + let capacity = N::MAX_SOLUTIONS.saturating_sub(num_unconfirmed_solutions); // Acquire the lock on the queue. let mut queue = self.solutions_queue.lock(); // Determine the number of solutions to send. @@ -228,11 +325,17 @@ impl Consensus { }; // Iterate over the solutions. for solution in solutions.into_iter() { - let solution_id = solution.commitment(); + let solution_id = solution.id(); trace!("Adding unconfirmed solution '{}' to the memory pool...", fmt_id(solution_id)); // Send the unconfirmed solution to the primary. if let Err(e) = self.primary_sender().send_unconfirmed_solution(solution_id, Data::Object(solution)).await { - warn!("Failed to add unconfirmed solution '{}' to the memory pool - {e}", fmt_id(solution_id)); + // If the BFT is synced, then log the warning. + if self.bft.is_synced() { + // If error occurs after the first 10 blocks of the epoch, log it as a warning, otherwise ignore. + if self.ledger().latest_block_height() % N::NUM_BLOCKS_PER_EPOCH > 10 { + warn!("Failed to add unconfirmed solution '{}' to the memory pool - {e}", fmt_id(solution_id)) + }; + } } } Ok(()) @@ -240,6 +343,12 @@ impl Consensus { /// Adds the given unconfirmed transaction to the memory pool. pub async fn add_unconfirmed_transaction(&self, transaction: Transaction) -> Result<()> { + #[cfg(feature = "metrics")] + { + metrics::increment_gauge(metrics::consensus::UNCONFIRMED_TRANSACTIONS, 1f64); + let timestamp = snarkos_node_bft::helpers::now(); + self.transmissions_queue_timestamps.lock().insert(TransmissionID::Transaction(transaction.id()), timestamp); + } // Process the unconfirmed transaction. { let transaction_id = transaction.id(); @@ -259,28 +368,43 @@ impl Consensus { } // Add the transaction to the memory pool. trace!("Received unconfirmed transaction '{}' in the queue", fmt_id(transaction_id)); - if self.transactions_queue.lock().put(transaction_id, transaction).is_some() { + if transaction.is_deploy() { + if self.transactions_queue.lock().deployments.put(transaction_id, transaction).is_some() { + bail!("Transaction '{}' exists in the memory pool", fmt_id(transaction_id)); + } + } else if self.transactions_queue.lock().executions.put(transaction_id, transaction).is_some() { bail!("Transaction '{}' exists in the memory pool", fmt_id(transaction_id)); } } // If the memory pool of this node is full, return early. - let num_unconfirmed = self.num_unconfirmed_transmissions(); - if num_unconfirmed > MAX_TRANSMISSIONS_PER_BATCH { + let num_unconfirmed_transmissions = self.num_unconfirmed_transmissions(); + if num_unconfirmed_transmissions >= Primary::::MAX_TRANSMISSIONS_TOLERANCE { return Ok(()); } // Retrieve the transactions. let transactions = { // Determine the available capacity. - let capacity = MAX_TRANSMISSIONS_PER_BATCH.saturating_sub(num_unconfirmed); - // Acquire the lock on the queue. - let mut queue = self.transactions_queue.lock(); - // Determine the number of transactions to send. - let num_transactions = queue.len().min(capacity); - // Drain the solutions from the queue. - (0..num_transactions) - .filter_map(|_| queue.pop_lru().map(|(_, transaction)| transaction)) - .collect::>() + let capacity = Primary::::MAX_TRANSMISSIONS_TOLERANCE.saturating_sub(num_unconfirmed_transmissions); + // Acquire the lock on the transactions queue. + let mut tx_queue = self.transactions_queue.lock(); + // Determine the number of deployments to send. + let num_deployments = tx_queue.deployments.len().min(capacity).min(MAX_DEPLOYMENTS_PER_INTERVAL); + // Determine the number of executions to send. + let num_executions = tx_queue.executions.len().min(capacity.saturating_sub(num_deployments)); + // Create an iterator which will select interleaved deployments and executions within the capacity. + // Note: interleaving ensures we will never have consecutive invalid deployments blocking the queue. + let selector_iter = (0..num_deployments).map(|_| true).interleave((0..num_executions).map(|_| false)); + // Drain the transactions from the queue, interleaving deployments and executions. + selector_iter + .filter_map(|select_deployment| { + if select_deployment { + tx_queue.deployments.pop_lru().map(|(_, tx)| tx) + } else { + tx_queue.executions.pop_lru().map(|(_, tx)| tx) + } + }) + .collect_vec() }; // Iterate over the transactions. for transaction in transactions.into_iter() { @@ -290,7 +414,13 @@ impl Consensus { if let Err(e) = self.primary_sender().send_unconfirmed_transaction(transaction_id, Data::Object(transaction)).await { - warn!("Failed to add unconfirmed transaction '{}' to the memory pool - {e}", fmt_id(transaction_id)); + // If the BFT is synced, then log the warning. + if self.bft.is_synced() { + warn!( + "Failed to add unconfirmed transaction '{}' to the memory pool - {e}", + fmt_id(transaction_id) + ); + } } } Ok(()) @@ -359,13 +489,18 @@ impl Consensus { let elapsed = std::time::Duration::from_secs((snarkos_node_bft::helpers::now() - start) as u64); let next_block_timestamp = next_block.header().metadata().timestamp(); let block_latency = next_block_timestamp - current_block_timestamp; + let proof_target = next_block.header().proof_target(); + let coinbase_target = next_block.header().coinbase_target(); + let cumulative_proof_target = next_block.header().cumulative_proof_target(); + + metrics::add_transmission_latency_metric(&self.transmissions_queue_timestamps, &next_block); - metrics::gauge(metrics::blocks::HEIGHT, next_block.height() as f64); - metrics::counter(metrics::blocks::TRANSACTIONS, next_block.transactions().len() as u64); - metrics::gauge(metrics::consensus::LAST_COMMITTED_ROUND, next_block.round() as f64); metrics::gauge(metrics::consensus::COMMITTED_CERTIFICATES, num_committed_certificates as f64); metrics::histogram(metrics::consensus::CERTIFICATE_COMMIT_LATENCY, elapsed.as_secs_f64()); metrics::histogram(metrics::consensus::BLOCK_LATENCY, block_latency as f64); + metrics::gauge(metrics::blocks::PROOF_TARGET, proof_target as f64); + metrics::gauge(metrics::blocks::COINBASE_TARGET, coinbase_target as f64); + metrics::gauge(metrics::blocks::CUMULATIVE_PROOF_TARGET, cumulative_proof_target as f64); } Ok(()) } @@ -392,9 +527,9 @@ impl Consensus { // Send the transmission to the primary. match (transmission_id, transmission) { (TransmissionID::Ratification, Transmission::Ratification) => return Ok(()), - (TransmissionID::Solution(commitment), Transmission::Solution(solution)) => { + (TransmissionID::Solution(solution_id), Transmission::Solution(solution)) => { // Send the solution to the primary. - self.primary_sender().tx_unconfirmed_solution.send((commitment, solution, callback)).await?; + self.primary_sender().tx_unconfirmed_solution.send((solution_id, solution, callback)).await?; } (TransmissionID::Transaction(transaction_id), Transmission::Transaction(transaction)) => { // Send the transaction to the primary. diff --git a/node/metrics/Cargo.toml b/node/metrics/Cargo.toml index 22128ba622..36c757bed1 100644 --- a/node/metrics/Cargo.toml +++ b/node/metrics/Cargo.toml @@ -17,15 +17,27 @@ license = "Apache-2.0" edition = "2021" [features] +default = [ "rayon", "snarkvm/metrics"] metrics = [ "snarkvm/metrics" ] +serial = ["snarkvm/metrics"] [dependencies.metrics-exporter-prometheus] version = "0.13" +[dependencies.parking_lot] +version = "0.12" + +[dependencies.rayon] +version = "1" +optional = true + [dependencies.snarkvm] workspace = true features = [ "metrics" ] +[dependencies.time] +version = "0.3" + [dependencies.tokio] version = "1.28" features = [ "rt" ] diff --git a/node/metrics/snarkOS-grafana.json b/node/metrics/snarkOS-grafana.json index 87070f09a4..b708e5d2ba 100644 --- a/node/metrics/snarkOS-grafana.json +++ b/node/metrics/snarkOS-grafana.json @@ -1,41 +1,40 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.2.3" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], +{ "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } +], +"__elements": {}, +"__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.2.3" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } +], "annotations": { "list": [ { @@ -61,20 +60,450 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": null, + "id": 1, "links": [], "liveNow": false, "panels": [ { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 26, + "panels": [], + "title": "Aleo Network", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 49, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "snarkos_bft_height_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Block Height", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 60 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 48, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "avg(\n 1 / rate(snarkos_bft_height_total{}[1m])\n) < +inf", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Average Seconds/Block over last minute", + "transformations": [ + { + "id": "reduce", + "options": { + "reducers": [ + "mean" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 23, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "10.4.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "snarkos_bft_height_total", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Block Height", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "displayName": "Restricted Peers", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "snarkos_router_restricted_total", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Restricted Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "displayName": "Total Transactions", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 + "h": 8, + "w": 12, + "x": 12, + "y": 9 }, - "id": 26, - "title": "Aleo Network", - "type": "row" + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "snarkos_blocks_transactions_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Total Transactions", + "type": "timeseries" }, { "datasource": { @@ -118,7 +547,7 @@ "mode": "off" } }, - "displayName": "Connected Peers", + "displayName": "TPS averaged over 5 minutes", "mappings": [], "thresholds": { "mode": "absolute", @@ -126,10 +555,6 @@ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] } @@ -140,9 +565,9 @@ "h": 8, "w": 12, "x": 0, - "y": 1 + "y": 17 }, - "id": 27, + "id": 30, "options": { "legend": { "calcs": [], @@ -161,14 +586,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "snarkos_router_connected_total", + "editorMode": "builder", + "expr": "sum(rate(snarkos_blocks_transactions_total[5m]))", "legendFormat": "__auto", "range": true, "refId": "A" } ], - "title": "Connected Peers", + "title": "TPS", "type": "timeseries" }, { @@ -213,7 +638,6 @@ "mode": "off" } }, - "displayName": "Candidate Peers", "mappings": [], "thresholds": { "mode": "absolute", @@ -235,9 +659,9 @@ "h": 8, "w": 12, "x": 12, - "y": 1 + "y": 17 }, - "id": 28, + "id": 53, "options": { "legend": { "calcs": [], @@ -246,6 +670,7 @@ "showLegend": true }, "tooltip": { + "maxHeight": 600, "mode": "single", "sort": "none" } @@ -256,14 +681,46 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "disableTextWrap": false, "editorMode": "code", - "expr": "snarkos_router_candidate_total", - "legendFormat": "__auto", + "expr": "max(snarkos_blocks_coinbase_target)", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "interval": "", + "legendFormat": "Coinbase Target", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "max(snarkos_blocks_proof_target)", + "hide": false, + "instant": false, + "legendFormat": "Proof Target", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "max(snarkos_blocks_cumulative_proof_target)", + "hide": false, + "instant": false, + "legendFormat": "Cumulative Proof Target", + "range": true, + "refId": "C" } ], - "title": "Candidate Peers", + "title": "Difficulty", "type": "timeseries" }, { @@ -308,7 +765,7 @@ "mode": "off" } }, - "displayName": "Restricted Peers", + "displayName": "Total Transmissions", "mappings": [], "thresholds": { "mode": "absolute", @@ -316,10 +773,6 @@ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] } @@ -329,10 +782,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 9 + "x": 12, + "y": 25 }, - "id": 29, + "id": 42, "options": { "legend": { "calcs": [], @@ -351,14 +804,19 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "snarkos_router_restricted_total", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "snarkos_blocks_solutions_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, "legendFormat": "__auto", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false } ], - "title": "Restricted Peers", + "title": "Total Solutions", "type": "timeseries" }, { @@ -369,8 +827,41 @@ "fieldConfig": { "defaults": { "color": { - "mode": "thresholds" + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } }, + "displayName": "Total Stake", "mappings": [], "thresholds": { "mode": "absolute", @@ -378,6 +869,10 @@ { "color": "green", "value": null + }, + { + "color": "red", + "value": 80 } ] } @@ -387,41 +882,42 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 9 + "x": 0, + "y": 25 }, - "id": 23, + "id": 31, "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - "textMode": "auto", - "wideLayout": true + "tooltip": { + "mode": "single", + "sort": "none" + } }, - "pluginVersion": "10.2.3", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "snarkos_blocks_height_total", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "snarkvm_ledger_committee_total_stake", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, "legendFormat": "__auto", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false } ], - "title": "Block Height", - "type": "stat" + "title": "Total Stake", + "type": "timeseries" }, { "datasource": { @@ -465,7 +961,6 @@ "mode": "off" } }, - "displayName": "TPS averaged over 5 minutes", "mappings": [], "thresholds": { "mode": "absolute", @@ -473,6 +968,10 @@ { "color": "green", "value": null + }, + { + "color": "red", + "value": 80 } ] } @@ -482,10 +981,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 17 + "x": 12, + "y": 25 }, - "id": 30, + "id": 47, "options": { "legend": { "calcs": [], @@ -504,14 +1003,19 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "disableTextWrap": false, "editorMode": "builder", - "expr": "sum(rate(snarkos_blocks_transactions_total[5m]))", + "expr": "snarkos_bft_connected_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, "legendFormat": "__auto", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false } ], - "title": "TPS", + "title": "SnarkOS BFT Connected Total", "type": "timeseries" }, { @@ -556,7 +1060,7 @@ "mode": "off" } }, - "displayName": "Total Transactions", + "displayName": "Connected Peers", "mappings": [], "thresholds": { "mode": "absolute", @@ -577,10 +1081,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 17 + "x": 0, + "y": 33 }, - "id": 36, + "id": 27, "options": { "legend": { "calcs": [], @@ -599,19 +1103,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "snarkos_blocks_transactions_total", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, + "editorMode": "code", + "expr": "snarkos_router_connected_total", "legendFormat": "__auto", "range": true, - "refId": "A", - "useBackend": false + "refId": "A" } ], - "title": "Total Transactions", + "title": "Connected Peers", "type": "timeseries" }, { @@ -656,7 +1155,7 @@ "mode": "off" } }, - "displayName": "Total Stake", + "displayName": "Candidate Peers", "mappings": [], "thresholds": { "mode": "absolute", @@ -677,10 +1176,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 25 + "x": 12, + "y": 33 }, - "id": 31, + "id": 28, "options": { "legend": { "calcs": [], @@ -699,19 +1198,14 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "snarkvm_ledger_committee_total_stake", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, + "editorMode": "code", + "expr": "snarkos_router_candidate_total", "legendFormat": "__auto", "range": true, - "refId": "A", - "useBackend": false + "refId": "A" } ], - "title": "Total Stake", + "title": "Candidate Peers", "type": "timeseries" }, { @@ -720,7 +1214,7 @@ "h": 1, "w": 24, "x": 0, - "y": 33 + "y": 41 }, "id": 18, "panels": [], @@ -752,9 +1246,9 @@ }, "gridPos": { "h": 8, - "w": 6, + "w": 8, "x": 0, - "y": 34 + "y": 42 }, "id": 16, "options": { @@ -769,10 +1263,11 @@ "fields": "", "values": false }, + "showPercentChange": false, "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.2.3", + "pluginVersion": "10.4.1", "targets": [ { "datasource": { @@ -818,9 +1313,9 @@ }, "gridPos": { "h": 8, - "w": 6, - "x": 6, - "y": 34 + "w": 8, + "x": 8, + "y": 42 }, "id": 25, "options": { @@ -835,10 +1330,11 @@ "fields": "", "values": false }, + "showPercentChange": false, "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.2.3", + "pluginVersion": "10.4.1", "targets": [ { "datasource": { @@ -846,7 +1342,7 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "snarkos_consensus_last_committed_round", + "expr": "snarkos_bft_last_committed_round", "legendFormat": "__auto", "range": true, "refId": "A" @@ -880,11 +1376,11 @@ }, "gridPos": { "h": 8, - "w": 6, - "x": 12, - "y": 34 + "w": 8, + "x": 16, + "y": 42 }, - "id": 12, + "id": 24, "options": { "colorMode": "value", "graphMode": "area", @@ -897,24 +1393,25 @@ "fields": "", "values": false }, + "showPercentChange": false, "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.2.3", + "pluginVersion": "10.4.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "builder", - "expr": "snarkos_consensus_committed_certificates_total", + "editorMode": "code", + "expr": "snarkos_bft_leaders_elected_total", "legendFormat": "__auto", "range": true, "refId": "A" } ], - "title": "Committed Certificates", + "title": "Elected Leaders", "type": "stat" }, { @@ -942,11 +1439,11 @@ }, "gridPos": { "h": 8, - "w": 6, - "x": 18, - "y": 34 + "w": 7, + "x": 4, + "y": 50 }, - "id": 24, + "id": 40, "options": { "colorMode": "value", "graphMode": "area", @@ -959,24 +1456,93 @@ "fields": "", "values": false }, + "showPercentChange": false, "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.2.3", + "pluginVersion": "10.4.1", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", - "expr": "snarkos_bft_leaders_elected_total", + "disableTextWrap": false, + "editorMode": "builder", + "expr": "snarkos_bft_primary_certified_batches", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Certified Batches", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 7, + "x": 11, + "y": 50 + }, + "id": 12, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "snarkos_consensus_committed_certificates_total", "legendFormat": "__auto", "range": true, "refId": "A" } ], - "title": "Elected Leaders", + "title": "Committed Certificates", "type": "stat" }, { @@ -985,7 +1551,7 @@ "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 58 }, "id": 10, "panels": [], @@ -1055,9 +1621,9 @@ }, "gridPos": { "h": 8, - "w": 9, + "w": 12, "x": 0, - "y": 43 + "y": 59 }, "id": 8, "options": { @@ -1179,9 +1745,9 @@ }, "gridPos": { "h": 8, - "w": 9, - "x": 9, - "y": 43 + "w": 12, + "x": 12, + "y": 59 }, "id": 14, "options": { @@ -1260,7 +1826,7 @@ "mode": "off" } }, - "displayName": "Average Block Latency", + "displayName": "Total Transactions", "mappings": [], "thresholds": { "mode": "absolute", @@ -1268,24 +1834,19 @@ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] - }, - "unit": "s" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 6, - "y": 51 + "x": 0, + "y": 67 }, - "id": 38, + "id": 43, "options": { "legend": { "calcs": [], @@ -1306,7 +1867,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(snarkos_consensus_block_latency_secs)", + "expr": "snarkos_consensus_unconfirmed_transactions_total", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1316,22 +1877,9 @@ "useBackend": false } ], - "title": "Block Latency", + "title": "Total Unconfirmed Transactions", "type": "timeseries" }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 59 - }, - "id": 4, - "panels": [], - "title": "Network", - "type": "row" - }, { "datasource": { "type": "prometheus", @@ -1374,7 +1922,7 @@ "mode": "off" } }, - "displayName": "Queue Depth", + "displayName": "Total Solutions", "mappings": [], "thresholds": { "mode": "absolute", @@ -1382,10 +1930,6 @@ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] } @@ -1395,10 +1939,10 @@ "gridPos": { "h": 8, "w": 12, - "x": 6, - "y": 60 + "x": 12, + "y": 67 }, - "id": 37, + "id": 44, "options": { "legend": { "calcs": [], @@ -1419,7 +1963,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "snarkos_tcp_tasks_total", + "expr": "snarkos_consensus_unconfirmed_solutions_total", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1429,7 +1973,7 @@ "useBackend": false } ], - "title": "TCP Queue Depth", + "title": "Total Unconfirmed Solutions", "type": "timeseries" }, { @@ -1474,21 +2018,17 @@ "mode": "off" } }, - "displayName": "average encrypt time", + "displayName": "Total Transmissions", "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" - }, - { - "color": "red", - "value": 80 + "color": "green", + "value": null } ] - }, - "unit": "µs" + } }, "overrides": [] }, @@ -1496,9 +2036,9 @@ "h": 8, "w": 12, "x": 0, - "y": 68 + "y": 75 }, - "id": 32, + "id": 45, "options": { "legend": { "calcs": [], @@ -1519,7 +2059,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(snarkos_tcp_noise_codec_encryption_micros)", + "expr": "snarkos_consensus_unconfirmed_transmissions_total", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1529,7 +2069,7 @@ "useBackend": false } ], - "title": "Encryption Time", + "title": "Total Unconfirmed Transmissions", "type": "timeseries" }, { @@ -1574,21 +2114,21 @@ "mode": "off" } }, - "displayName": "average decrypt time", + "displayName": "Average Block Latency", "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "µs" + } }, "overrides": [] }, @@ -1596,9 +2136,9 @@ "h": 8, "w": 12, "x": 12, - "y": 68 + "y": 75 }, - "id": 33, + "id": 38, "options": { "legend": { "calcs": [], @@ -1619,121 +2159,31 @@ }, "disableTextWrap": false, "editorMode": "builder", - "exemplar": false, - "expr": "avg(snarkos_tcp_noise_codec_decryption_micros)", - "format": "time_series", + "expr": "avg(snarkos_consensus_block_latency_secs)", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, - "interval": "", "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false } ], - "title": "Decryption Time", + "title": "Block Latency", "type": "timeseries" }, { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "displayName": "Average Size", - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "decbytes" - }, - "overrides": [] - }, + "collapsed": false, "gridPos": { - "h": 8, - "w": 12, + "h": 1, + "w": 24, "x": 0, - "y": 76 - }, - "id": 34, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } + "y": 83 }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "avg(snarkos_tcp_noise_codec_encryption_size)", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Encryption Size", - "type": "timeseries" + "id": 4, + "panels": [], + "title": "Network", + "type": "row" }, { "datasource": { @@ -1777,31 +2227,31 @@ "mode": "off" } }, - "displayName": "Average Size", + "displayName": "Queue Depth", "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", "value": 80 } ] - }, - "unit": "decbytes" + } }, "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 76 + "x": 6, + "y": 84 }, - "id": 35, + "id": 37, "options": { "legend": { "calcs": [], @@ -1822,7 +2272,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "avg(snarkos_tcp_noise_codec_decryption_size)", + "expr": "snarkos_tcp_tasks_total", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1832,7 +2282,7 @@ "useBackend": false } ], - "title": "Decryption Size", + "title": "TCP Queue Depth", "type": "timeseries" } ], @@ -1866,7 +2316,6 @@ }, "timezone": "", "title": "snarkOS", - "uid": "ahTJm4-4k", - "version": 1, + "version": 3, "weekStart": "" } \ No newline at end of file diff --git a/node/metrics/src/lib.rs b/node/metrics/src/lib.rs index 0543a64eea..f25f53caef 100644 --- a/node/metrics/src/lib.rs +++ b/node/metrics/src/lib.rs @@ -16,9 +16,27 @@ mod names; // Expose the names at the crate level for easy access. pub use names::*; + // Re-export the snarkVM metrics. pub use snarkvm::metrics::*; +#[cfg(not(feature = "serial"))] +use rayon::prelude::*; + +use parking_lot::Mutex; +use snarkvm::{ + ledger::narwhal::TransmissionID, + prelude::{cfg_iter, Block, Network}, +}; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; +use time::OffsetDateTime; + /// Initializes the metrics and returns a handle to the task running the metrics exporter. pub fn initialize_metrics() { // Build the Prometheus exporter. @@ -38,3 +56,93 @@ pub fn initialize_metrics() { register_histogram(name); } } + +pub fn update_block_metrics(block: &Block) { + use snarkvm::ledger::ConfirmedTransaction; + + let accepted_deploy = AtomicUsize::new(0); + let accepted_execute = AtomicUsize::new(0); + let rejected_deploy = AtomicUsize::new(0); + let rejected_execute = AtomicUsize::new(0); + + // Add transaction to atomic counter based on enum type match. + cfg_iter!(block.transactions()).for_each(|tx| match tx { + ConfirmedTransaction::AcceptedDeploy(_, _, _) => { + accepted_deploy.fetch_add(1, Ordering::Relaxed); + } + ConfirmedTransaction::AcceptedExecute(_, _, _) => { + accepted_execute.fetch_add(1, Ordering::Relaxed); + } + ConfirmedTransaction::RejectedDeploy(_, _, _, _) => { + rejected_deploy.fetch_add(1, Ordering::Relaxed); + } + ConfirmedTransaction::RejectedExecute(_, _, _, _) => { + rejected_execute.fetch_add(1, Ordering::Relaxed); + } + }); + + increment_gauge(blocks::ACCEPTED_DEPLOY, accepted_deploy.load(Ordering::Relaxed) as f64); + increment_gauge(blocks::ACCEPTED_EXECUTE, accepted_execute.load(Ordering::Relaxed) as f64); + increment_gauge(blocks::REJECTED_DEPLOY, rejected_deploy.load(Ordering::Relaxed) as f64); + increment_gauge(blocks::REJECTED_EXECUTE, rejected_execute.load(Ordering::Relaxed) as f64); + + // Update aborted transactions and solutions. + increment_gauge(blocks::ABORTED_TRANSACTIONS, block.aborted_transaction_ids().len() as f64); + increment_gauge(blocks::ABORTED_SOLUTIONS, block.aborted_solution_ids().len() as f64); +} + +pub fn add_transmission_latency_metric( + transmissions_queue_timestamps: &Arc, i64>>>, + block: &Block, +) { + const AGE_THRESHOLD_SECONDS: i32 = 30 * 60; // 30 minutes set as stale transmission threshold + + // Retrieve the solution IDs. + let solution_ids: std::collections::HashSet<_> = + block.solutions().solution_ids().chain(block.aborted_solution_ids()).collect(); + + // Retrieve the transaction IDs. + let transaction_ids: std::collections::HashSet<_> = + block.transaction_ids().chain(block.aborted_transaction_ids()).collect(); + + let mut transmission_queue_timestamps = transmissions_queue_timestamps.lock(); + let ts_now = OffsetDateTime::now_utc().unix_timestamp(); + + // Determine which keys to remove. + let keys_to_remove = cfg_iter!(transmission_queue_timestamps) + .flat_map(|(key, timestamp)| { + let elapsed_time = std::time::Duration::from_secs((ts_now - *timestamp) as u64); + + if elapsed_time.as_secs() > AGE_THRESHOLD_SECONDS as u64 { + // This entry is stale-- remove it from transmission queue and record it as a stale transmission. + increment_counter(consensus::STALE_UNCONFIRMED_TRANSMISSIONS); + Some(*key) + } else { + let transmission_type = match key { + TransmissionID::Solution(solution_id) if solution_ids.contains(solution_id) => Some("solution"), + TransmissionID::Transaction(transaction_id) if transaction_ids.contains(transaction_id) => { + Some("transaction") + } + _ => None, + }; + + if let Some(transmission_type_string) = transmission_type { + histogram_label( + consensus::TRANSMISSION_LATENCY, + "transmission_type", + transmission_type_string.to_owned(), + elapsed_time.as_secs_f64(), + ); + Some(*key) + } else { + None + } + } + }) + .collect::>(); + + // Remove keys of stale or seen transmissions. + for key in keys_to_remove { + transmission_queue_timestamps.remove(&key); + } +} diff --git a/node/metrics/src/names.rs b/node/metrics/src/names.rs index 6f244e5f40..7fcb8e2fa8 100644 --- a/node/metrics/src/names.rs +++ b/node/metrics/src/names.rs @@ -12,32 +12,39 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub(super) const COUNTER_NAMES: [&str; 1] = [bft::LEADERS_ELECTED]; +pub(super) const COUNTER_NAMES: [&str; 2] = [bft::LEADERS_ELECTED, consensus::STALE_UNCONFIRMED_TRANSMISSIONS]; -pub(super) const GAUGE_NAMES: [&str; 12] = [ +pub(super) const GAUGE_NAMES: [&str; 26] = [ bft::CONNECTED, bft::CONNECTING, bft::LAST_STORED_ROUND, bft::PROPOSAL_ROUND, - blocks::HEIGHT, + bft::CERTIFIED_BATCHES, + bft::HEIGHT, + bft::LAST_COMMITTED_ROUND, + bft::IS_SYNCED, + blocks::SOLUTIONS, blocks::TRANSACTIONS, + blocks::ACCEPTED_DEPLOY, + blocks::ACCEPTED_EXECUTE, + blocks::REJECTED_DEPLOY, + blocks::REJECTED_EXECUTE, + blocks::ABORTED_TRANSACTIONS, + blocks::ABORTED_SOLUTIONS, + blocks::PROOF_TARGET, + blocks::COINBASE_TARGET, + blocks::CUMULATIVE_PROOF_TARGET, consensus::COMMITTED_CERTIFICATES, - consensus::LAST_COMMITTED_ROUND, + consensus::UNCONFIRMED_SOLUTIONS, + consensus::UNCONFIRMED_TRANSACTIONS, router::CONNECTED, router::CANDIDATE, router::RESTRICTED, tcp::TCP_TASKS, ]; -pub(super) const HISTOGRAM_NAMES: [&str; 7] = [ - bft::COMMIT_ROUNDS_LATENCY, - consensus::CERTIFICATE_COMMIT_LATENCY, - consensus::BLOCK_LATENCY, - tcp::NOISE_CODEC_ENCRYPTION_TIME, - tcp::NOISE_CODEC_DECRYPTION_TIME, - tcp::NOISE_CODEC_ENCRYPTION_SIZE, - tcp::NOISE_CODEC_DECRYPTION_SIZE, -]; +pub(super) const HISTOGRAM_NAMES: [&str; 3] = + [bft::COMMIT_ROUNDS_LATENCY, consensus::CERTIFICATE_COMMIT_LATENCY, consensus::BLOCK_LATENCY]; pub mod bft { pub const COMMIT_ROUNDS_LATENCY: &str = "snarkos_bft_commit_rounds_latency_secs"; // <-- This one doesn't even make sense. @@ -46,18 +53,34 @@ pub mod bft { pub const LAST_STORED_ROUND: &str = "snarkos_bft_last_stored_round"; pub const LEADERS_ELECTED: &str = "snarkos_bft_leaders_elected_total"; pub const PROPOSAL_ROUND: &str = "snarkos_bft_primary_proposal_round"; + pub const CERTIFIED_BATCHES: &str = "snarkos_bft_primary_certified_batches"; + pub const HEIGHT: &str = "snarkos_bft_height_total"; + pub const LAST_COMMITTED_ROUND: &str = "snarkos_bft_last_committed_round"; + pub const IS_SYNCED: &str = "snarkos_bft_is_synced"; } pub mod blocks { - pub const HEIGHT: &str = "snarkos_blocks_height_total"; pub const TRANSACTIONS: &str = "snarkos_blocks_transactions_total"; + pub const SOLUTIONS: &str = "snarkos_blocks_solutions_total"; + pub const ACCEPTED_DEPLOY: &str = "snarkos_blocks_accepted_deploy"; + pub const ACCEPTED_EXECUTE: &str = "snarkos_blocks_accepted_execute"; + pub const REJECTED_DEPLOY: &str = "snarkos_blocks_rejected_deploy"; + pub const REJECTED_EXECUTE: &str = "snarkos_blocks_rejected_execute"; + pub const ABORTED_TRANSACTIONS: &str = "snarkos_blocks_aborted_transactions"; + pub const ABORTED_SOLUTIONS: &str = "snarkos_blocks_aborted_solutions"; + pub const PROOF_TARGET: &str = "snarkos_blocks_proof_target"; + pub const COINBASE_TARGET: &str = "snarkos_blocks_coinbase_target"; + pub const CUMULATIVE_PROOF_TARGET: &str = "snarkos_blocks_cumulative_proof_target"; } pub mod consensus { pub const CERTIFICATE_COMMIT_LATENCY: &str = "snarkos_consensus_certificate_commit_latency_secs"; pub const COMMITTED_CERTIFICATES: &str = "snarkos_consensus_committed_certificates_total"; - pub const LAST_COMMITTED_ROUND: &str = "snarkos_consensus_last_committed_round"; pub const BLOCK_LATENCY: &str = "snarkos_consensus_block_latency_secs"; + pub const UNCONFIRMED_TRANSACTIONS: &str = "snarkos_consensus_unconfirmed_transactions_total"; + pub const UNCONFIRMED_SOLUTIONS: &str = "snarkos_consensus_unconfirmed_solutions_total"; + pub const TRANSMISSION_LATENCY: &str = "snarkos_consensus_transmission_latency"; + pub const STALE_UNCONFIRMED_TRANSMISSIONS: &str = "snarkos_consensus_stale_unconfirmed_transmissions"; } pub mod router { @@ -67,9 +90,5 @@ pub mod router { } pub mod tcp { - pub const NOISE_CODEC_ENCRYPTION_TIME: &str = "snarkos_tcp_noise_codec_encryption_micros"; - pub const NOISE_CODEC_DECRYPTION_TIME: &str = "snarkos_tcp_noise_codec_decryption_micros"; - pub const NOISE_CODEC_ENCRYPTION_SIZE: &str = "snarkos_tcp_noise_codec_encryption_size"; - pub const NOISE_CODEC_DECRYPTION_SIZE: &str = "snarkos_tcp_noise_codec_decryption_size"; pub const TCP_TASKS: &str = "snarkos_tcp_tasks_total"; } diff --git a/node/rest/src/lib.rs b/node/rest/src/lib.rs index ea1f76d125..f3195063ca 100644 --- a/node/rest/src/lib.rs +++ b/node/rest/src/lib.rs @@ -112,76 +112,90 @@ impl, R: Routing> Rest { GovernorConfigBuilder::default() .per_second(1) .burst_size(rest_rps) - .error_handler(|error| Response::new(error.to_string())) + .error_handler(|error| Response::new(error.to_string().into())) .finish() .expect("Couldn't set up rate limiting for the REST server!"), ); + // Get the network being used. + let network = match N::ID { + snarkvm::console::network::MainnetV0::ID => "mainnet", + snarkvm::console::network::TestnetV0::ID => "testnet", + unknown_id => { + eprintln!("Unknown network ID ({unknown_id})"); + return; + } + }; + let router = { axum::Router::new() // All the endpoints before the call to `route_layer` are protected with JWT auth. - .route("/testnet3/node/address", get(Self::get_node_address)) + .route(&format!("/{network}/node/address"), get(Self::get_node_address)) .route_layer(middleware::from_fn(auth_middleware)) // ----------------- DEPRECATED ROUTES ----------------- // The following `GET ../latest/..` routes will be removed before mainnet. // Please refer to the recommended routes for each endpoint: - // Deprecated: use `/testnet3/block/height/latest` instead. - .route("/testnet3/latest/height", get(Self::latest_height)) - // Deprecated: use `/testnet3/block/hash/latest` instead. - .route("/testnet3/latest/hash", get(Self::latest_hash)) - // Deprecated: use `/testnet3/latest/block/height` instead. - .route("/testnet3/latest/block", get(Self::latest_block)) - // Deprecated: use `/testnet3/stateRoot/latest` instead. - .route("/testnet3/latest/stateRoot", get(Self::latest_state_root)) - // Deprecated: use `/testnet3/committee/latest` instead. - .route("/testnet3/latest/committee", get(Self::latest_committee)) + // Deprecated: use `//block/height/latest` instead. + .route(&format!("/{network}/latest/height"), get(Self::latest_height)) + // Deprecated: use `//block/hash/latest` instead. + .route(&format!("/{network}/latest/hash"), get(Self::latest_hash)) + // Deprecated: use `//latest/block/height` instead. + .route(&format!("/{network}/latest/block"), get(Self::latest_block)) + // Deprecated: use `//stateRoot/latest` instead. + .route(&format!("/{network}/latest/stateRoot"), get(Self::latest_state_root)) + // Deprecated: use `//committee/latest` instead. + .route(&format!("/{network}/latest/committee"), get(Self::latest_committee)) // ------------------------------------------------------ // GET ../block/.. - .route("/testnet3/block/height/latest", get(Self::get_block_height_latest)) - .route("/testnet3/block/hash/latest", get(Self::get_block_hash_latest)) - .route("/testnet3/block/latest", get(Self::get_block_latest)) - .route("/testnet3/block/:height_or_hash", get(Self::get_block)) + .route(&format!("/{network}/block/height/latest"), get(Self::get_block_height_latest)) + .route(&format!("/{network}/block/hash/latest"), get(Self::get_block_hash_latest)) + .route(&format!("/{network}/block/latest"), get(Self::get_block_latest)) + .route(&format!("/{network}/block/:height_or_hash"), get(Self::get_block)) // The path param here is actually only the height, but the name must match the route // above, otherwise there'll be a conflict at runtime. - .route("/testnet3/block/:height_or_hash/transactions", get(Self::get_block_transactions)) + .route(&format!("/{network}/block/:height_or_hash/transactions"), get(Self::get_block_transactions)) // GET and POST ../transaction/.. - .route("/testnet3/transaction/:id", get(Self::get_transaction)) - .route("/testnet3/transaction/confirmed/:id", get(Self::get_confirmed_transaction)) - .route("/testnet3/transaction/broadcast", post(Self::transaction_broadcast)) + .route(&format!("/{network}/transaction/:id"), get(Self::get_transaction)) + .route(&format!("/{network}/transaction/confirmed/:id"), get(Self::get_confirmed_transaction)) + .route(&format!("/{network}/transaction/broadcast"), post(Self::transaction_broadcast)) // POST ../solution/broadcast - .route("/testnet3/solution/broadcast", post(Self::solution_broadcast)) + .route(&format!("/{network}/solution/broadcast"), post(Self::solution_broadcast)) // GET ../find/.. - .route("/testnet3/find/blockHash/:tx_id", get(Self::find_block_hash)) - .route("/testnet3/find/transactionID/deployment/:program_id", get(Self::find_transaction_id_from_program_id)) - .route("/testnet3/find/transactionID/:transition_id", get(Self::find_transaction_id_from_transition_id)) - .route("/testnet3/find/transitionID/:input_or_output_id", get(Self::find_transition_id)) + .route(&format!("/{network}/find/blockHash/:tx_id"), get(Self::find_block_hash)) + .route(&format!("/{network}/find/blockHeight/:state_root"), get(Self::find_block_height_from_state_root)) + .route(&format!("/{network}/find/transactionID/deployment/:program_id"), get(Self::find_transaction_id_from_program_id)) + .route(&format!("/{network}/find/transactionID/:transition_id"), get(Self::find_transaction_id_from_transition_id)) + .route(&format!("/{network}/find/transitionID/:input_or_output_id"), get(Self::find_transition_id)) // GET ../peers/.. - .route("/testnet3/peers/count", get(Self::get_peers_count)) - .route("/testnet3/peers/all", get(Self::get_peers_all)) - .route("/testnet3/peers/all/metrics", get(Self::get_peers_all_metrics)) + .route(&format!("/{network}/peers/count"), get(Self::get_peers_count)) + .route(&format!("/{network}/peers/all"), get(Self::get_peers_all)) + .route(&format!("/{network}/peers/all/metrics"), get(Self::get_peers_all_metrics)) // GET ../program/.. - .route("/testnet3/program/:id", get(Self::get_program)) - .route("/testnet3/program/:id/mappings", get(Self::get_mapping_names)) - .route("/testnet3/program/:id/mapping/:name/:key", get(Self::get_mapping_value)) + .route(&format!("/{network}/program/:id"), get(Self::get_program)) + .route(&format!("/{network}/program/:id/mappings"), get(Self::get_mapping_names)) + .route(&format!("/{network}/program/:id/mapping/:name/:key"), get(Self::get_mapping_value)) // GET misc endpoints. - .route("/testnet3/blocks", get(Self::get_blocks)) - .route("/testnet3/height/:hash", get(Self::get_height)) - .route("/testnet3/memoryPool/transmissions", get(Self::get_memory_pool_transmissions)) - .route("/testnet3/memoryPool/solutions", get(Self::get_memory_pool_solutions)) - .route("/testnet3/memoryPool/transactions", get(Self::get_memory_pool_transactions)) - .route("/testnet3/statePath/:commitment", get(Self::get_state_path_for_commitment)) - .route("/testnet3/stateRoot/latest", get(Self::get_state_root_latest)) - .route("/testnet3/committee/latest", get(Self::get_committee_latest)) + .route(&format!("/{network}/blocks"), get(Self::get_blocks)) + .route(&format!("/{network}/height/:hash"), get(Self::get_height)) + .route(&format!("/{network}/memoryPool/transmissions"), get(Self::get_memory_pool_transmissions)) + .route(&format!("/{network}/memoryPool/solutions"), get(Self::get_memory_pool_solutions)) + .route(&format!("/{network}/memoryPool/transactions"), get(Self::get_memory_pool_transactions)) + .route(&format!("/{network}/statePath/:commitment"), get(Self::get_state_path_for_commitment)) + .route(&format!("/{network}/stateRoot/latest"), get(Self::get_state_root_latest)) + .route(&format!("/{network}/stateRoot/:height"), get(Self::get_state_root)) + .route(&format!("/{network}/committee/latest"), get(Self::get_committee_latest)) + .route(&format!("/{network}/committee/:height"), get(Self::get_committee)) + .route(&format!("/{network}/delegators/:validator"), get(Self::get_delegators_for_validator)) // Pass in `Rest` to make things convenient. .with_state(self.clone()) @@ -191,8 +205,8 @@ impl, R: Routing> Rest { .layer(middleware::from_fn(log_middleware)) // Enable CORS. .layer(cors) - // Cap body size at 10MB. - .layer(DefaultBodyLimit::max(10 * 1024 * 1024)) + // Cap body size at 512KiB. + .layer(DefaultBodyLimit::max(512 * 1024)) .layer(GovernorLayer { // We can leak this because it is created only once and it persists. config: Box::leak(governor_config), @@ -217,3 +231,13 @@ async fn log_middleware( Ok(next.run(request).await) } + +/// Formats an ID into a truncated identifier (for logging purposes). +pub fn fmt_id(id: impl ToString) -> String { + let id = id.to_string(); + let mut formatted_id = id.chars().take(16).collect::(); + if id.chars().count() > 16 { + formatted_id.push_str(".."); + } + formatted_id +} diff --git a/node/rest/src/routes.rs b/node/rest/src/routes.rs index ed28b52496..20ef762911 100644 --- a/node/rest/src/routes.rs +++ b/node/rest/src/routes.rs @@ -13,10 +13,10 @@ // limitations under the License. use super::*; -use snarkos_node_router::messages::UnconfirmedSolution; +use snarkos_node_router::{messages::UnconfirmedSolution, SYNC_LENIENCY}; use snarkvm::{ - ledger::coinbase::ProverSolution, - prelude::{block::Transaction, Identifier, Plaintext}, + ledger::puzzle::Solution, + prelude::{block::Transaction, Address, Identifier, LimitedWriter, Plaintext, ToBytes}, }; use indexmap::IndexMap; @@ -45,59 +45,59 @@ impl, R: Routing> Rest { // Please use the recommended alternatives when implementing new features or refactoring. // Deprecated: Use `get_block_height_latest` instead. - // GET /testnet3/latest/height + // GET //latest/height pub(crate) async fn latest_height(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_height()) } // Deprecated: Use `get_block_hash_latest` instead. - // GET /testnet3/latest/hash + // GET //latest/hash pub(crate) async fn latest_hash(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_hash()) } // Deprecated: Use `get_block_latest` instead. - // GET /testnet3/latest/block + // GET //latest/block pub(crate) async fn latest_block(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_block()) } // Deprecated: Use `get_state_root_latest` instead. - // GET /testnet3/latest/stateRoot + // GET //latest/stateRoot pub(crate) async fn latest_state_root(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_state_root()) } // Deprecated: Use `get_committee_latest` instead. - // GET /testnet3/latest/committee + // GET //latest/committee pub(crate) async fn latest_committee(State(rest): State) -> Result { Ok(ErasedJson::pretty(rest.ledger.latest_committee()?)) } // --------------------------------------------------------- - // GET /testnet3/block/height/latest + // GET //block/height/latest pub(crate) async fn get_block_height_latest(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_height()) } - // GET /testnet3/block/hash/latest + // GET //block/hash/latest pub(crate) async fn get_block_hash_latest(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_hash()) } - // GET /testnet3/block/latest + // GET //block/latest pub(crate) async fn get_block_latest(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_block()) } - // GET /testnet3/block/{height} - // GET /testnet3/block/{blockHash} + // GET //block/{height} + // GET //block/{blockHash} pub(crate) async fn get_block( State(rest): State, Path(height_or_hash): Path, ) -> Result { - // Manually parse the height or the height or the hash, axum doesn't support different types + // Manually parse the height or the height of the hash, axum doesn't support different types // for the same path param. let block = if let Ok(height) = height_or_hash.parse::() { rest.ledger.get_block(height)? @@ -112,7 +112,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(block)) } - // GET /testnet3/blocks?start={start_height}&end={end_height} + // GET //blocks?start={start_height}&end={end_height} pub(crate) async fn get_blocks( State(rest): State, Query(block_range): Query, @@ -135,14 +135,21 @@ impl, R: Routing> Rest { ))); } - let blocks = cfg_into_iter!((start_height..end_height)) - .map(|height| rest.ledger.get_block(height)) - .collect::, _>>()?; - - Ok(ErasedJson::pretty(blocks)) + // Fetch the blocks from ledger. + match tokio::task::spawn_blocking(move || { + cfg_into_iter!((start_height..end_height)) + .map(|height| rest.ledger.get_block(height)) + .collect::, _>>() + }) + .await + { + Ok(Ok(blocks)) => Ok(ErasedJson::pretty(blocks)), + Ok(Err(err)) => Err(RestError(format!("Failed to get blocks '{start_height}..{end_height}' - {err}"))), + Err(err) => Err(RestError(format!("Failed to get blocks '{start_height}..{end_height}' - {err}"))), + } } - // GET /testnet3/height/{blockHash} + // GET //height/{blockHash} pub(crate) async fn get_height( State(rest): State, Path(hash): Path, @@ -150,7 +157,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.get_height(&hash)?)) } - // GET /testnet3/block/{height}/transactions + // GET //block/{height}/transactions pub(crate) async fn get_block_transactions( State(rest): State, Path(height): Path, @@ -158,7 +165,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.get_transactions(height)?)) } - // GET /testnet3/transaction/{transactionID} + // GET //transaction/{transactionID} pub(crate) async fn get_transaction( State(rest): State, Path(tx_id): Path, @@ -166,7 +173,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.get_transaction(tx_id)?)) } - // GET /testnet3/transaction/confirmed/{transactionID} + // GET //transaction/confirmed/{transactionID} pub(crate) async fn get_confirmed_transaction( State(rest): State, Path(tx_id): Path, @@ -174,7 +181,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.get_confirmed_transaction(tx_id)?)) } - // GET /testnet3/memoryPool/transmissions + // GET //memoryPool/transmissions pub(crate) async fn get_memory_pool_transmissions(State(rest): State) -> Result { match rest.consensus { Some(consensus) => { @@ -184,7 +191,7 @@ impl, R: Routing> Rest { } } - // GET /testnet3/memoryPool/solutions + // GET //memoryPool/solutions pub(crate) async fn get_memory_pool_solutions(State(rest): State) -> Result { match rest.consensus { Some(consensus) => Ok(ErasedJson::pretty(consensus.unconfirmed_solutions().collect::>())), @@ -192,7 +199,7 @@ impl, R: Routing> Rest { } } - // GET /testnet3/memoryPool/transactions + // GET //memoryPool/transactions pub(crate) async fn get_memory_pool_transactions(State(rest): State) -> Result { match rest.consensus { Some(consensus) => Ok(ErasedJson::pretty(consensus.unconfirmed_transactions().collect::>())), @@ -200,7 +207,7 @@ impl, R: Routing> Rest { } } - // GET /testnet3/program/{programID} + // GET //program/{programID} pub(crate) async fn get_program( State(rest): State, Path(id): Path>, @@ -208,7 +215,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.get_program(id)?)) } - // GET /testnet3/program/{programID}/mappings + // GET //program/{programID}/mappings pub(crate) async fn get_mapping_names( State(rest): State, Path(id): Path>, @@ -216,8 +223,8 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.vm().finalize_store().get_mapping_names_confirmed(&id)?)) } - // GET /testnet3/program/{programID}/mapping/{mappingName}/{mappingKey} - // GET /testnet3/program/{programID}/mapping/{mappingName}/{mappingKey}?metadata={true} + // GET //program/{programID}/mapping/{mappingName}/{mappingKey} + // GET //program/{programID}/mapping/{mappingName}/{mappingKey}?metadata={true} pub(crate) async fn get_mapping_value( State(rest): State, Path((id, name, key)): Path<(ProgramID, Identifier, Plaintext)>, @@ -238,7 +245,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(mapping_value)) } - // GET /testnet3/statePath/{commitment} + // GET //statePath/{commitment} pub(crate) async fn get_state_path_for_commitment( State(rest): State, Path(commitment): Path>, @@ -246,37 +253,71 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.get_state_path_for_commitment(&commitment)?)) } - // GET /testnet3/stateRoot/latest + // GET //stateRoot/latest pub(crate) async fn get_state_root_latest(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.ledger.latest_state_root()) } - // GET /testnet3/committee/latest + // GET //stateRoot/{height} + pub(crate) async fn get_state_root( + State(rest): State, + Path(height): Path, + ) -> Result { + Ok(ErasedJson::pretty(rest.ledger.get_state_root(height)?)) + } + + // GET //committee/latest pub(crate) async fn get_committee_latest(State(rest): State) -> Result { Ok(ErasedJson::pretty(rest.ledger.latest_committee()?)) } - // GET /testnet3/peers/count + // GET //committee/{height} + pub(crate) async fn get_committee( + State(rest): State, + Path(height): Path, + ) -> Result { + Ok(ErasedJson::pretty(rest.ledger.get_committee(height)?)) + } + + // GET //delegators/{validator} + pub(crate) async fn get_delegators_for_validator( + State(rest): State, + Path(validator): Path>, + ) -> Result { + // Do not process the request if the node is too far behind to avoid sending outdated data. + if rest.routing.num_blocks_behind() > SYNC_LENIENCY { + return Err(RestError("Unable to request delegators (node is syncing)".to_string())); + } + + // Return the delegators for the given validator. + match tokio::task::spawn_blocking(move || rest.ledger.get_delegators_for_validator(&validator)).await { + Ok(Ok(delegators)) => Ok(ErasedJson::pretty(delegators)), + Ok(Err(err)) => Err(RestError(format!("Unable to request delegators - {err}"))), + Err(err) => Err(RestError(format!("Unable to request delegators - {err}"))), + } + } + + // GET //peers/count pub(crate) async fn get_peers_count(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.routing.router().number_of_connected_peers()) } - // GET /testnet3/peers/all + // GET //peers/all pub(crate) async fn get_peers_all(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.routing.router().connected_peers()) } - // GET /testnet3/peers/all/metrics + // GET //peers/all/metrics pub(crate) async fn get_peers_all_metrics(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.routing.router().connected_metrics()) } - // GET /testnet3/node/address + // GET //node/address pub(crate) async fn get_node_address(State(rest): State) -> ErasedJson { ErasedJson::pretty(rest.routing.router().address()) } - // GET /testnet3/find/blockHash/{transactionID} + // GET //find/blockHash/{transactionID} pub(crate) async fn find_block_hash( State(rest): State, Path(tx_id): Path, @@ -284,7 +325,15 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.find_block_hash(&tx_id)?)) } - // GET /testnet3/find/transactionID/deployment/{programID} + // GET //find/blockHeight/{stateRoot} + pub(crate) async fn find_block_height_from_state_root( + State(rest): State, + Path(state_root): Path, + ) -> Result { + Ok(ErasedJson::pretty(rest.ledger.find_block_height_from_state_root(state_root)?)) + } + + // GET //find/transactionID/deployment/{programID} pub(crate) async fn find_transaction_id_from_program_id( State(rest): State, Path(program_id): Path>, @@ -292,7 +341,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.find_transaction_id_from_program_id(&program_id)?)) } - // GET /testnet3/find/transactionID/{transitionID} + // GET //find/transactionID/{transitionID} pub(crate) async fn find_transaction_id_from_transition_id( State(rest): State, Path(transition_id): Path, @@ -300,7 +349,7 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.find_transaction_id_from_transition_id(&transition_id)?)) } - // GET /testnet3/find/transitionID/{inputOrOutputID} + // GET //find/transitionID/{inputOrOutputID} pub(crate) async fn find_transition_id( State(rest): State, Path(input_or_output_id): Path>, @@ -308,11 +357,25 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(rest.ledger.find_transition_id(&input_or_output_id)?)) } - // POST /testnet3/transaction/broadcast + // POST //transaction/broadcast pub(crate) async fn transaction_broadcast( State(rest): State, Json(tx): Json>, ) -> Result { + // Do not process the transaction if the node is too far behind. + if rest.routing.num_blocks_behind() > SYNC_LENIENCY { + return Err(RestError(format!("Unable to broadcast transaction '{}' (node is syncing)", fmt_id(tx.id())))); + } + + // If the transaction exceeds the transaction size limit, return an error. + // The buffer is initially roughly sized to hold a `transfer_public`, + // most transactions will be smaller and this reduces unnecessary allocations. + // TODO: Should this be a blocking task? + let buffer = Vec::with_capacity(3000); + if tx.write_le(LimitedWriter::new(buffer, N::MAX_TRANSACTION_SIZE)).is_err() { + return Err(RestError("Transaction size exceeds the byte limit".to_string())); + } + // If the consensus module is enabled, add the unconfirmed transaction to the memory pool. if let Some(consensus) = rest.consensus { // Add the unconfirmed transaction to the memory pool. @@ -332,27 +395,53 @@ impl, R: Routing> Rest { Ok(ErasedJson::pretty(tx_id)) } - // POST /testnet3/solution/broadcast + // POST //solution/broadcast pub(crate) async fn solution_broadcast( State(rest): State, - Json(prover_solution): Json>, + Json(solution): Json>, ) -> Result { + // Do not process the solution if the node is too far behind. + if rest.routing.num_blocks_behind() > SYNC_LENIENCY { + return Err(RestError(format!( + "Unable to broadcast solution '{}' (node is syncing)", + fmt_id(solution.id()) + ))); + } + // If the consensus module is enabled, add the unconfirmed solution to the memory pool. - if let Some(consensus) = rest.consensus { + // Otherwise, verify it prior to broadcasting. + match rest.consensus { // Add the unconfirmed solution to the memory pool. - consensus.add_unconfirmed_solution(prover_solution).await?; + Some(consensus) => consensus.add_unconfirmed_solution(solution).await?, + // Verify the solution. + None => { + // Compute the current epoch hash. + let epoch_hash = rest.ledger.latest_epoch_hash()?; + // Retrieve the current proof target. + let proof_target = rest.ledger.latest_proof_target(); + // Ensure that the solution is valid for the given epoch. + let puzzle = rest.ledger.puzzle().clone(); + // Verify the solution in a blocking task. + match tokio::task::spawn_blocking(move || puzzle.check_solution(&solution, epoch_hash, proof_target)) + .await + { + Ok(Ok(())) => {} + Ok(Err(err)) => { + return Err(RestError(format!("Invalid solution '{}' - {err}", fmt_id(solution.id())))); + } + Err(err) => return Err(RestError(format!("Invalid solution '{}' - {err}", fmt_id(solution.id())))), + } + } } - let commitment = prover_solution.commitment(); + let solution_id = solution.id(); // Prepare the unconfirmed solution message. - let message = Message::UnconfirmedSolution(UnconfirmedSolution { - solution_id: commitment, - solution: Data::Object(prover_solution), - }); + let message = + Message::UnconfirmedSolution(UnconfirmedSolution { solution_id, solution: Data::Object(solution) }); // Broadcast the unconfirmed solution message. rest.routing.propagate(message, &[]); - Ok(ErasedJson::pretty(commitment)) + Ok(ErasedJson::pretty(solution_id)) } } diff --git a/node/router/messages/Cargo.toml b/node/router/messages/Cargo.toml index 503ef8c1f6..22999d4294 100644 --- a/node/router/messages/Cargo.toml +++ b/node/router/messages/Cargo.toml @@ -47,9 +47,6 @@ version = "=2.2.7" [dependencies.snarkvm] workspace = true -[dependencies.snow] -version = "0.9.6" - [dependencies.tokio] version = "1.28" features = [ diff --git a/node/router/messages/src/block_response.rs b/node/router/messages/src/block_response.rs index d7da6b93cf..75a4135dd6 100644 --- a/node/router/messages/src/block_response.rs +++ b/node/router/messages/src/block_response.rs @@ -74,7 +74,7 @@ pub mod prop_tests { }; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_block() -> BoxedStrategy> { any::().prop_map(|seed| sample_genesis_block(&mut TestRng::fixed(seed))).boxed() diff --git a/node/router/messages/src/challenge_request.rs b/node/router/messages/src/challenge_request.rs index b1bafedb8e..cdcc3638ff 100644 --- a/node/router/messages/src/challenge_request.rs +++ b/node/router/messages/src/challenge_request.rs @@ -76,7 +76,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_valid_address() -> BoxedStrategy> { any::().prop_map(|seed| Address::rand(&mut TestRng::fixed(seed))).boxed() diff --git a/node/router/messages/src/challenge_response.rs b/node/router/messages/src/challenge_response.rs index 3c75d4db19..190001837e 100644 --- a/node/router/messages/src/challenge_response.rs +++ b/node/router/messages/src/challenge_response.rs @@ -25,6 +25,7 @@ use std::borrow::Cow; pub struct ChallengeResponse { pub genesis_header: Header, pub signature: Data>, + pub nonce: u64, } impl MessageTrait for ChallengeResponse { @@ -38,13 +39,18 @@ impl MessageTrait for ChallengeResponse { impl ToBytes for ChallengeResponse { fn write_le(&self, mut writer: W) -> io::Result<()> { self.genesis_header.write_le(&mut writer)?; - self.signature.write_le(&mut writer) + self.signature.write_le(&mut writer)?; + self.nonce.write_le(&mut writer) } } impl FromBytes for ChallengeResponse { fn read_le(mut reader: R) -> io::Result { - Ok(Self { genesis_header: Header::read_le(&mut reader)?, signature: Data::read_le(reader)? }) + Ok(Self { + genesis_header: Header::read_le(&mut reader)?, + signature: Data::read_le(&mut reader)?, + nonce: u64::read_le(reader)?, + }) } } @@ -62,7 +68,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_signature() -> BoxedStrategy> { (0..64) @@ -80,8 +86,12 @@ pub mod prop_tests { } pub fn any_challenge_response() -> BoxedStrategy> { - (any_signature(), any_genesis_header()) - .prop_map(|(sig, genesis_header)| ChallengeResponse { signature: Data::Object(sig), genesis_header }) + (any_signature(), any_genesis_header(), any::()) + .prop_map(|(sig, genesis_header, nonce)| ChallengeResponse { + signature: Data::Object(sig), + genesis_header, + nonce, + }) .boxed() } diff --git a/node/router/messages/src/helpers/codec.rs b/node/router/messages/src/helpers/codec.rs index a3bba2abf8..c9ecb0f227 100644 --- a/node/router/messages/src/helpers/codec.rs +++ b/node/router/messages/src/helpers/codec.rs @@ -75,14 +75,49 @@ impl Decoder for MessageCodec { None => return Ok(None), }; + Self::Item::check_size(&bytes)?; + // Convert the bytes to a message, or fail if it is not valid. let reader = bytes.reader(); match Message::read_le(reader) { Ok(message) => Ok(Some(message)), Err(error) => { - error!("Failed to deserialize a message: {}", error); + warn!("Failed to deserialize a message - {}", error); Err(std::io::ErrorKind::InvalidData.into()) } } } } + +#[cfg(test)] +mod tests { + use super::*; + + use crate::{ + unconfirmed_transaction::prop_tests::{any_large_unconfirmed_transaction, any_unconfirmed_transaction}, + UnconfirmedTransaction, + }; + + use proptest::prelude::ProptestConfig; + use test_strategy::proptest; + + type CurrentNetwork = snarkvm::prelude::MainnetV0; + + #[proptest] + fn unconfirmed_transaction(#[strategy(any_unconfirmed_transaction())] tx: UnconfirmedTransaction) { + let mut bytes = BytesMut::new(); + let mut codec = MessageCodec::::default(); + assert!(codec.encode(Message::UnconfirmedTransaction(tx), &mut bytes).is_ok()); + assert!(codec.decode(&mut bytes).is_ok()); + } + + #[proptest(ProptestConfig { cases : 10, ..ProptestConfig::default() })] + fn overly_large_unconfirmed_transaction( + #[strategy(any_large_unconfirmed_transaction())] tx: UnconfirmedTransaction, + ) { + let mut bytes = BytesMut::new(); + let mut codec = MessageCodec::::default(); + assert!(codec.encode(Message::UnconfirmedTransaction(tx), &mut bytes).is_ok()); + assert!(matches!(codec.decode(&mut bytes), Err(err) if err.kind() == std::io::ErrorKind::InvalidData)); + } +} diff --git a/node/router/messages/src/lib.rs b/node/router/messages/src/lib.rs index baa512b5b4..66a3ddd5ce 100644 --- a/node/router/messages/src/lib.rs +++ b/node/router/messages/src/lib.rs @@ -64,8 +64,8 @@ pub use snarkos_node_bft_events::DataBlocks; use snarkos_node_sync_locators::BlockLocators; use snarkvm::prelude::{ block::{Header, Transaction}, - coinbase::{EpochChallenge, ProverSolution, PuzzleCommitment}, error, + puzzle::{Solution, SolutionID}, Address, FromBytes, Network, @@ -111,7 +111,7 @@ impl From for Message { impl Message { /// The version of the network protocol; it can be incremented in order to force users to update. - pub const VERSION: u32 = 13; + pub const VERSION: u32 = 15; /// Returns the message name. #[inline] @@ -152,6 +152,28 @@ impl Message { Self::UnconfirmedTransaction(..) => 12, } } + + /// Checks the message byte length. To be used before deserialization. + pub fn check_size(bytes: &[u8]) -> io::Result<()> { + // Store the length to be checked against the max message size for each variant. + let len = bytes.len(); + if len < 2 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid message")); + } + + // Check the first two bytes for the message ID. + let id_bytes: [u8; 2] = (&bytes[..2]) + .try_into() + .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "id couldn't be deserialized"))?; + let id = u16::from_le_bytes(id_bytes); + + // SPECIAL CASE: check the transaction message isn't too large. + if id == 12 && len > N::MAX_TRANSACTION_SIZE { + return Err(io::Error::new(io::ErrorKind::InvalidData, "transaction is too large"))?; + } + + Ok(()) + } } impl ToBytes for Message { diff --git a/node/router/messages/src/ping.rs b/node/router/messages/src/ping.rs index bf439ec781..05e4f9f719 100644 --- a/node/router/messages/src/ping.rs +++ b/node/router/messages/src/ping.rs @@ -16,7 +16,6 @@ use super::*; use snarkvm::prelude::{FromBytes, ToBytes}; -use indexmap::IndexMap; use std::borrow::Cow; #[derive(Clone, Debug, PartialEq, Eq)] @@ -40,18 +39,7 @@ impl ToBytes for Ping { self.node_type.write_le(&mut writer)?; if let Some(locators) = &self.block_locators { 1u8.write_le(&mut writer)?; - - (locators.recents.len().min(u32::MAX as usize) as u32).write_le(&mut writer)?; - for (height, hash) in locators.recents.iter() { - height.write_le(&mut writer)?; - hash.write_le(&mut writer)?; - } - - (locators.checkpoints.len().min(u32::MAX as usize) as u32).write_le(&mut writer)?; - for (height, hash) in locators.checkpoints.iter() { - height.write_le(&mut writer)?; - hash.write_le(&mut writer)?; - } + locators.write_le(&mut writer)?; } else { 0u8.write_le(&mut writer)?; } @@ -66,32 +54,13 @@ impl FromBytes for Ping { let node_type = NodeType::read_le(&mut reader)?; let selector = u8::read_le(&mut reader)?; + let block_locators = match selector { + 0 => None, + 1 => Some(BlockLocators::read_le(&mut reader)?), + _ => return Err(error("Invalid block locators marker")), + }; - if selector == 0 { - Ok(Self { version, node_type, block_locators: None }) - } else if selector == 1 { - let mut recents = IndexMap::new(); - let num_recents = u32::read_le(&mut reader)?; - for _ in 0..num_recents { - let height = u32::read_le(&mut reader)?; - let hash = N::BlockHash::read_le(&mut reader)?; - recents.insert(height, hash); - } - - let mut checkpoints = IndexMap::new(); - let num_checkpoints = u32::read_le(&mut reader)?; - for _ in 0..num_checkpoints { - let height = u32::read_le(&mut reader)?; - let hash = N::BlockHash::read_le(&mut reader)?; - checkpoints.insert(height, hash); - } - - let block_locators = Some(BlockLocators { recents, checkpoints }); - - Ok(Self { version, node_type, block_locators }) - } else { - Err(error("Invalid selector of optional block locators in ping message")) - } + Ok(Self { version, node_type, block_locators }) } } @@ -111,7 +80,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_block_locators() -> BoxedStrategy> { any::().prop_map(sample_block_locators).boxed() diff --git a/node/router/messages/src/puzzle_response.rs b/node/router/messages/src/puzzle_response.rs index 1523d17cb4..ecafccac1d 100644 --- a/node/router/messages/src/puzzle_response.rs +++ b/node/router/messages/src/puzzle_response.rs @@ -23,7 +23,7 @@ use std::borrow::Cow; #[derive(Clone, Debug, PartialEq, Eq)] pub struct PuzzleResponse { - pub epoch_challenge: EpochChallenge, + pub epoch_hash: N::BlockHash, pub block_header: Data>, } @@ -37,45 +37,44 @@ impl MessageTrait for PuzzleResponse { impl ToBytes for PuzzleResponse { fn write_le(&self, mut writer: W) -> io::Result<()> { - self.epoch_challenge.write_le(&mut writer)?; + self.epoch_hash.write_le(&mut writer)?; self.block_header.write_le(&mut writer) } } impl FromBytes for PuzzleResponse { fn read_le(mut reader: R) -> io::Result { - Ok(Self { epoch_challenge: EpochChallenge::read_le(&mut reader)?, block_header: Data::read_le(reader)? }) + Ok(Self { epoch_hash: N::BlockHash::read_le(&mut reader)?, block_header: Data::read_le(reader)? }) } } #[cfg(test)] pub mod prop_tests { - use crate::{challenge_response::prop_tests::any_genesis_header, EpochChallenge, PuzzleResponse}; + use crate::{challenge_response::prop_tests::any_genesis_header, PuzzleResponse}; use snarkvm::{ console::prelude::{FromBytes, ToBytes}, ledger::narwhal::Data, - prelude::{Rng, TestRng}, + prelude::{Network, Rng, TestRng}, }; use bytes::{Buf, BufMut, BytesMut}; use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; - pub fn any_epoch_challenge() -> BoxedStrategy> { + pub fn any_epoch_hash() -> BoxedStrategy<::BlockHash> { any::() .prop_map(|seed| { let mut rng = TestRng::fixed(seed); - let degree: u16 = rng.gen_range(1..=u16::MAX); - EpochChallenge::::new(rng.gen(), rng.gen(), degree as u32).unwrap() + rng.gen() }) .boxed() } pub fn any_puzzle_response() -> BoxedStrategy> { - (any_epoch_challenge(), any_genesis_header()) - .prop_map(|(epoch_challenge, bh)| PuzzleResponse { epoch_challenge, block_header: Data::Object(bh) }) + (any_epoch_hash(), any_genesis_header()) + .prop_map(|(epoch_hash, bh)| PuzzleResponse { epoch_hash, block_header: Data::Object(bh) }) .boxed() } @@ -85,7 +84,7 @@ pub mod prop_tests { PuzzleResponse::write_le(&original, &mut buf).unwrap(); let deserialized: PuzzleResponse = PuzzleResponse::read_le(buf.into_inner().reader()).unwrap(); - assert_eq!(original.epoch_challenge, deserialized.epoch_challenge); + assert_eq!(original.epoch_hash, deserialized.epoch_hash); assert_eq!( original.block_header.deserialize_blocking().unwrap(), deserialized.block_header.deserialize_blocking().unwrap(), diff --git a/node/router/messages/src/unconfirmed_solution.rs b/node/router/messages/src/unconfirmed_solution.rs index eb8554a76f..9caf066ab9 100644 --- a/node/router/messages/src/unconfirmed_solution.rs +++ b/node/router/messages/src/unconfirmed_solution.rs @@ -23,8 +23,8 @@ use std::borrow::Cow; #[derive(Clone, Debug, PartialEq, Eq)] pub struct UnconfirmedSolution { - pub solution_id: PuzzleCommitment, - pub solution: Data>, + pub solution_id: SolutionID, + pub solution: Data>, } impl MessageTrait for UnconfirmedSolution { @@ -44,16 +44,15 @@ impl ToBytes for UnconfirmedSolution { impl FromBytes for UnconfirmedSolution { fn read_le(mut reader: R) -> io::Result { - Ok(Self { solution_id: PuzzleCommitment::read_le(&mut reader)?, solution: Data::read_le(reader)? }) + Ok(Self { solution_id: SolutionID::read_le(&mut reader)?, solution: Data::read_le(reader)? }) } } #[cfg(test)] pub mod prop_tests { - use crate::{ProverSolution, PuzzleCommitment, UnconfirmedSolution}; + use crate::{Solution, SolutionID, UnconfirmedSolution}; use snarkvm::{ - algorithms::polycommit::kzg10::{KZGCommitment, KZGProof}, - ledger::{coinbase::PartialSolution, narwhal::Data}, + ledger::narwhal::Data, prelude::{Address, FromBytes, PrivateKey, Rng, TestRng, ToBytes}, }; @@ -61,28 +60,25 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; - pub fn any_solution_id() -> BoxedStrategy> { - any::() - .prop_map(|seed| PuzzleCommitment::::new(KZGCommitment(TestRng::fixed(seed).gen()))) - .boxed() + pub fn any_solution_id() -> BoxedStrategy> { + any::().prop_map(|seed| TestRng::fixed(seed).gen::().into()).boxed() } - pub fn any_prover_solution() -> BoxedStrategy> { + pub fn any_solution() -> BoxedStrategy> { any::() .prop_map(|seed| { let mut rng = TestRng::fixed(seed); let private_key = PrivateKey::::new(&mut rng).unwrap(); let address = Address::try_from(private_key).unwrap(); - let partial_solution = PartialSolution::new(address, rng.gen(), KZGCommitment(rng.gen())); - ProverSolution::new(partial_solution, KZGProof { w: rng.gen(), random_v: None }) + Solution::new(rng.gen(), address, rng.gen()).unwrap() }) .boxed() } pub fn any_unconfirmed_solution() -> BoxedStrategy> { - (any_solution_id(), any_prover_solution()) + (any_solution_id(), any_solution()) .prop_map(|(solution_id, ps)| UnconfirmedSolution { solution_id, solution: Data::Object(ps) }) .boxed() } diff --git a/node/router/messages/src/unconfirmed_transaction.rs b/node/router/messages/src/unconfirmed_transaction.rs index d6565eaeb4..c64f0de490 100644 --- a/node/router/messages/src/unconfirmed_transaction.rs +++ b/node/router/messages/src/unconfirmed_transaction.rs @@ -60,7 +60,10 @@ impl FromBytes for UnconfirmedTransaction { pub mod prop_tests { use crate::{Transaction, UnconfirmedTransaction}; use snarkvm::{ - ledger::{ledger_test_helpers::sample_fee_public_transaction, narwhal::Data}, + ledger::{ + ledger_test_helpers::{sample_fee_public_transaction, sample_large_execution_transaction}, + narwhal::Data, + }, prelude::{FromBytes, TestRng, ToBytes}, }; @@ -68,7 +71,7 @@ pub mod prop_tests { use proptest::prelude::{any, BoxedStrategy, Strategy}; use test_strategy::proptest; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; pub fn any_transaction() -> BoxedStrategy> { any::() @@ -79,12 +82,27 @@ pub mod prop_tests { .boxed() } + pub fn any_large_transaction() -> BoxedStrategy> { + any::() + .prop_map(|seed| { + let mut rng = TestRng::fixed(seed); + sample_large_execution_transaction(&mut rng) + }) + .boxed() + } + pub fn any_unconfirmed_transaction() -> BoxedStrategy> { any_transaction() .prop_map(|tx| UnconfirmedTransaction { transaction_id: tx.id(), transaction: Data::Object(tx) }) .boxed() } + pub fn any_large_unconfirmed_transaction() -> BoxedStrategy> { + any_large_transaction() + .prop_map(|tx| UnconfirmedTransaction { transaction_id: tx.id(), transaction: Data::Object(tx) }) + .boxed() + } + #[proptest] fn unconfirmed_transaction_roundtrip( #[strategy(any_unconfirmed_transaction())] original: UnconfirmedTransaction, diff --git a/node/router/src/handshake.rs b/node/router/src/handshake.rs index 7ccb67fa89..57198692d4 100644 --- a/node/router/src/handshake.rs +++ b/node/router/src/handshake.rs @@ -164,12 +164,15 @@ impl Router { } /* Step 3: Send the challenge response. */ + let response_nonce: u64 = rng.gen(); + let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat(); // Sign the counterparty nonce. - let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else { + let Ok(our_signature) = self.account.sign_bytes(&data, rng) else { return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'"))); }; // Send the challenge response. - let our_response = ChallengeResponse { genesis_header, signature: Data::Object(our_signature) }; + let our_response = + ChallengeResponse { genesis_header, signature: Data::Object(our_signature), nonce: response_nonce }; send(&mut framed, peer_addr, Message::ChallengeResponse(our_response)).await?; // Add the peer to the router. @@ -213,11 +216,14 @@ impl Router { let rng = &mut OsRng; // Sign the counterparty nonce. - let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else { + let response_nonce: u64 = rng.gen(); + let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat(); + let Ok(our_signature) = self.account.sign_bytes(&data, rng) else { return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'"))); }; // Send the challenge response. - let our_response = ChallengeResponse { genesis_header, signature: Data::Object(our_signature) }; + let our_response = + ChallengeResponse { genesis_header, signature: Data::Object(our_signature), nonce: response_nonce }; send(&mut framed, peer_addr, Message::ChallengeResponse(our_response)).await?; // Sample a random nonce. @@ -258,6 +264,10 @@ impl Router { if self.is_connected(&peer_ip) { bail!("Dropping connection request from '{peer_ip}' (already connected)") } + // Only allow trusted peers to connect if allow_external_peers is set + if !self.allow_external_peers() && !self.is_trusted(&peer_ip) { + bail!("Dropping connection request from '{peer_ip}' (untrusted)") + } // Ensure the peer is not restricted. if self.is_restricted(&peer_ip) { bail!("Dropping connection request from '{peer_ip}' (restricted)") @@ -303,7 +313,7 @@ impl Router { expected_nonce: u64, ) -> Option { // Retrieve the components of the challenge response. - let ChallengeResponse { genesis_header, signature } = response; + let ChallengeResponse { genesis_header, signature, nonce } = response; // Verify the challenge response, by checking that the block header matches. if genesis_header != expected_genesis_header { @@ -316,7 +326,7 @@ impl Router { return Some(DisconnectReason::InvalidChallengeResponse); }; // Verify the signature. - if !signature.verify_bytes(&peer_address, &expected_nonce.to_le_bytes()) { + if !signature.verify_bytes(&peer_address, &[expected_nonce.to_le_bytes(), nonce.to_le_bytes()].concat()) { warn!("Handshake with '{peer_addr}' failed (invalid signature)"); return Some(DisconnectReason::InvalidChallengeResponse); } diff --git a/node/router/src/heartbeat.rs b/node/router/src/heartbeat.rs index 992b38763a..dca8c6d225 100644 --- a/node/router/src/heartbeat.rs +++ b/node/router/src/heartbeat.rs @@ -40,6 +40,8 @@ pub trait Heartbeat: Outbound { const MEDIAN_NUMBER_OF_PEERS: usize = max(Self::MAXIMUM_NUMBER_OF_PEERS / 2, Self::MINIMUM_NUMBER_OF_PEERS); /// The maximum number of peers permitted to maintain connections with. const MAXIMUM_NUMBER_OF_PEERS: usize = 21; + /// The maximum number of provers to maintain connections with. + const MAXIMUM_NUMBER_OF_PROVERS: usize = Self::MAXIMUM_NUMBER_OF_PEERS / 4; /// Handles the heartbeat request. fn heartbeat(&self) { @@ -68,6 +70,7 @@ pub trait Heartbeat: Outbound { assert!(Self::MINIMUM_NUMBER_OF_PEERS <= Self::MAXIMUM_NUMBER_OF_PEERS); assert!(Self::MINIMUM_NUMBER_OF_PEERS <= Self::MEDIAN_NUMBER_OF_PEERS); assert!(Self::MEDIAN_NUMBER_OF_PEERS <= Self::MAXIMUM_NUMBER_OF_PEERS); + assert!(Self::MAXIMUM_NUMBER_OF_PROVERS <= Self::MAXIMUM_NUMBER_OF_PEERS); } /// This function logs the connected peers. @@ -104,6 +107,11 @@ pub trait Heartbeat: Outbound { return; } + // Skip if the node is not requesting peers. + if !self.router().allow_external_peers() { + return; + } + // Retrieve the trusted peers. let trusted = self.router().trusted_peers(); // Retrieve the bootstrap peers. @@ -115,6 +123,8 @@ pub trait Heartbeat: Outbound { .get_connected_peers() .iter() .filter(|peer| !trusted.contains(&peer.ip()) && !bootstrap.contains(&peer.ip())) + .filter(|peer| !self.router().cache.contains_inbound_block_request(&peer.ip())) // Skip if the peer is syncing. + .filter(|peer| self.is_block_synced() || self.router().cache.num_outbound_block_requests(&peer.ip()) == 0) // Skip if you are syncing from this peer. .min_by_key(|peer| peer.last_seen()) .map(|peer| peer.ip()); @@ -132,13 +142,22 @@ pub trait Heartbeat: Outbound { fn handle_connected_peers(&self) { // Obtain the number of connected peers. let num_connected = self.router().number_of_connected_peers(); - // Compute the number of surplus peers. - let num_surplus = num_connected.saturating_sub(Self::MAXIMUM_NUMBER_OF_PEERS); - // Compute the number of deficit peers. - let num_deficient = Self::MEDIAN_NUMBER_OF_PEERS.saturating_sub(num_connected); + // Compute the total number of surplus peers. + let num_surplus_peers = num_connected.saturating_sub(Self::MAXIMUM_NUMBER_OF_PEERS); - if num_surplus > 0 { - debug!("Exceeded maximum number of connected peers, disconnecting from {num_surplus} peers"); + // Obtain the number of connected provers. + let num_connected_provers = self.router().number_of_connected_provers(); + // Compute the number of surplus provers. + let num_surplus_provers = num_connected_provers.saturating_sub(Self::MAXIMUM_NUMBER_OF_PROVERS); + // Compute the number of provers remaining connected. + let num_remaining_provers = num_connected_provers.saturating_sub(num_surplus_provers); + // Compute the number of surplus clients and validators. + let num_surplus_clients_validators = num_surplus_peers.saturating_sub(num_remaining_provers); + + if num_surplus_provers > 0 || num_surplus_clients_validators > 0 { + debug!( + "Exceeded maximum number of connected peers, disconnecting from ({num_surplus_provers} + {num_surplus_clients_validators}) peers" + ); // Retrieve the trusted peers. let trusted = self.router().trusted_peers(); @@ -148,18 +167,33 @@ pub trait Heartbeat: Outbound { // Initialize an RNG. let rng = &mut OsRng; - // TODO (howardwu): As a validator, prioritize disconnecting from clients and provers. + // Determine the provers to disconnect from. + let prover_ips_to_disconnect = self + .router() + .connected_provers() + .into_iter() + .filter(|peer_ip| !trusted.contains(peer_ip) && !bootstrap.contains(peer_ip)) + .choose_multiple(rng, num_surplus_provers); + + // TODO (howardwu): As a validator, prioritize disconnecting from clients. // Remove RNG, pick the `n` oldest nodes. - // Determine the peers to disconnect from. + // Determine the clients and validators to disconnect from. let peer_ips_to_disconnect = self .router() - .connected_peers() + .get_connected_peers() .into_iter() - .filter(|peer_ip| !trusted.contains(peer_ip) && !bootstrap.contains(peer_ip)) - .choose_multiple(rng, num_surplus); + .filter_map(|peer| { + let peer_ip = peer.ip(); + if !peer.is_prover() && !trusted.contains(&peer_ip) && !bootstrap.contains(&peer_ip) { + Some(peer_ip) + } else { + None + } + }) + .choose_multiple(rng, num_surplus_clients_validators); // Proceed to send disconnect requests to these peers. - for peer_ip in peer_ips_to_disconnect { + for peer_ip in peer_ips_to_disconnect.into_iter().chain(prover_ips_to_disconnect) { // TODO (howardwu): Remove this after specializing this function. if self.router().node_type().is_prover() { if let Some(peer) = self.router().get_connected_peer(&peer_ip) { @@ -176,6 +210,11 @@ pub trait Heartbeat: Outbound { } } + // Obtain the number of connected peers. + let num_connected = self.router().number_of_connected_peers(); + // Compute the number of deficit peers. + let num_deficient = Self::MEDIAN_NUMBER_OF_PEERS.saturating_sub(num_connected); + if num_deficient > 0 { // Initialize an RNG. let rng = &mut OsRng; @@ -184,9 +223,11 @@ pub trait Heartbeat: Outbound { for peer_ip in self.router().candidate_peers().into_iter().choose_multiple(rng, num_deficient) { self.router().connect(peer_ip); } - // Request more peers from the connected peers. - for peer_ip in self.router().connected_peers().into_iter().choose_multiple(rng, 3) { - self.send(peer_ip, Message::PeerRequest(PeerRequest)); + if self.router().allow_external_peers() { + // Request more peers from the connected peers. + for peer_ip in self.router().connected_peers().into_iter().choose_multiple(rng, 3) { + self.send(peer_ip, Message::PeerRequest(PeerRequest)); + } } } } @@ -238,7 +279,7 @@ pub trait Heartbeat: Outbound { } } - /// This function updates the coinbase puzzle if network has updated. + /// This function updates the puzzle if network has updated. fn handle_puzzle_request(&self) { // No-op } diff --git a/node/router/src/helpers/cache.rs b/node/router/src/helpers/cache.rs index 5233cfb5a3..db803a16c0 100644 --- a/node/router/src/helpers/cache.rs +++ b/node/router/src/helpers/cache.rs @@ -13,7 +13,7 @@ // limitations under the License. use crate::messages::BlockRequest; -use snarkvm::prelude::{coinbase::PuzzleCommitment, Network}; +use snarkvm::prelude::{puzzle::SolutionID, Network}; use core::hash::Hash; use linked_hash_map::LinkedHashMap; @@ -27,8 +27,8 @@ use time::{Duration, OffsetDateTime}; /// The maximum number of items to store in a cache map. const MAX_CACHE_SIZE: usize = 1 << 17; -/// A helper containing the peer IP and solution commitment. -type SolutionKey = (SocketAddr, PuzzleCommitment); +/// A helper containing the peer IP and solution ID. +type SolutionKey = (SocketAddr, SolutionID); /// A helper containing the peer IP and transaction ID. type TransactionKey = (SocketAddr, ::TransactionID); @@ -40,7 +40,9 @@ pub struct Cache { seen_inbound_messages: RwLock>>, /// The map of peer IPs to their recent timestamps. seen_inbound_puzzle_requests: RwLock>>, - /// The map of solution commitments to their last seen timestamp. + /// The map of peer IPs to their recent timestamps. + seen_inbound_block_requests: RwLock>>, + /// The map of solution IDs to their last seen timestamp. seen_inbound_solutions: RwLock, OffsetDateTime>>, /// The map of transaction IDs to their last seen timestamp. seen_inbound_transactions: RwLock, OffsetDateTime>>, @@ -48,7 +50,7 @@ pub struct Cache { seen_outbound_block_requests: RwLock>>, /// The map of peer IPs to the number of puzzle requests. seen_outbound_puzzle_requests: RwLock>, - /// The map of solution commitments to their last seen timestamp. + /// The map of solution IDs to their last seen timestamp. seen_outbound_solutions: RwLock, OffsetDateTime>>, /// The map of transaction IDs to their last seen timestamp. seen_outbound_transactions: RwLock, OffsetDateTime>>, @@ -64,12 +66,16 @@ impl Default for Cache { } impl Cache { + const INBOUND_BLOCK_REQUEST_INTERVAL: i64 = 60; + const INBOUND_PUZZLE_REQUEST_INTERVAL: i64 = 60; + /// Initializes a new instance of the cache. pub fn new() -> Self { Self { seen_inbound_connections: Default::default(), seen_inbound_messages: Default::default(), seen_inbound_puzzle_requests: Default::default(), + seen_inbound_block_requests: Default::default(), seen_inbound_solutions: RwLock::new(LinkedHashMap::with_capacity(MAX_CACHE_SIZE)), seen_inbound_transactions: RwLock::new(LinkedHashMap::with_capacity(MAX_CACHE_SIZE)), seen_outbound_block_requests: Default::default(), @@ -94,16 +100,17 @@ impl Cache { /// Inserts a new timestamp for the given peer IP, returning the number of recent requests. pub fn insert_inbound_puzzle_request(&self, peer_ip: SocketAddr) -> usize { - Self::retain_and_insert(&self.seen_inbound_puzzle_requests, peer_ip, 60) + Self::retain_and_insert(&self.seen_inbound_puzzle_requests, peer_ip, Self::INBOUND_PUZZLE_REQUEST_INTERVAL) } - /// Inserts a solution commitment into the cache, returning the previously seen timestamp if it existed. - pub fn insert_inbound_solution( - &self, - peer_ip: SocketAddr, - solution: PuzzleCommitment, - ) -> Option { - Self::refresh_and_insert(&self.seen_inbound_solutions, (peer_ip, solution)) + /// Inserts a new timestamp for the given peer IP, returning the number of recent block requests. + pub fn insert_inbound_block_request(&self, peer_ip: SocketAddr) -> usize { + Self::retain_and_insert(&self.seen_inbound_block_requests, peer_ip, Self::INBOUND_BLOCK_REQUEST_INTERVAL) + } + + /// Inserts a solution ID into the cache, returning the previously seen timestamp if it existed. + pub fn insert_inbound_solution(&self, peer_ip: SocketAddr, solution_id: SolutionID) -> Option { + Self::refresh_and_insert(&self.seen_inbound_solutions, (peer_ip, solution_id)) } /// Inserts a transaction ID into the cache, returning the previously seen timestamp if it existed. @@ -117,6 +124,16 @@ impl Cache { } impl Cache { + /// Returns `true` if the cache contains the block request for the given peer. + pub fn contains_inbound_block_request(&self, peer_ip: &SocketAddr) -> bool { + Self::retain(&self.seen_inbound_block_requests, *peer_ip, Self::INBOUND_BLOCK_REQUEST_INTERVAL) > 0 + } + + /// Returns the number of recent block requests for the given peer. + pub fn num_outbound_block_requests(&self, peer_ip: &SocketAddr) -> usize { + self.seen_outbound_block_requests.read().get(peer_ip).map(|r| r.len()).unwrap_or(0) + } + /// Returns `true` if the cache contains the block request for the given peer. pub fn contains_outbound_block_request(&self, peer_ip: &SocketAddr, request: &BlockRequest) -> bool { self.seen_outbound_block_requests.read().get(peer_ip).map(|r| r.contains(request)).unwrap_or(false) @@ -151,13 +168,9 @@ impl Cache { Self::decrement_counter(&self.seen_outbound_puzzle_requests, peer_ip) } - /// Inserts a solution commitment into the cache, returning the previously seen timestamp if it existed. - pub fn insert_outbound_solution( - &self, - peer_ip: SocketAddr, - solution: PuzzleCommitment, - ) -> Option { - Self::refresh_and_insert(&self.seen_outbound_solutions, (peer_ip, solution)) + /// Inserts a solution ID into the cache, returning the previously seen timestamp if it existed. + pub fn insert_outbound_solution(&self, peer_ip: SocketAddr, solution_id: SolutionID) -> Option { + Self::refresh_and_insert(&self.seen_outbound_solutions, (peer_ip, solution_id)) } /// Inserts a transaction ID into the cache, returning the previously seen timestamp if it existed. @@ -208,6 +221,26 @@ impl Cache { timestamps.len() } + /// Returns the number of recent entries. + fn retain( + map: &RwLock>>, + key: K, + interval_in_secs: i64, + ) -> usize { + // Fetch the current timestamp. + let now = OffsetDateTime::now_utc(); + + let mut map_write = map.write(); + // Load the entry for the key. + let timestamps = map_write.entry(key).or_default(); + // Retain only the timestamps that are within the recent interval. + while timestamps.front().map_or(false, |t| now - *t > Duration::seconds(interval_in_secs)) { + timestamps.pop_front(); + } + // Return the frequency of recent requests. + timestamps.len() + } + /// Increments the key's counter in the map, returning the updated counter. fn increment_counter(map: &RwLock>, key: K) -> u32 { let mut map_write = map.write(); @@ -260,29 +293,50 @@ impl Cache { #[cfg(test)] mod tests { use super::*; - use snarkvm::prelude::Testnet3; + use snarkvm::prelude::MainnetV0; use std::net::Ipv4Addr; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; + + #[test] + fn test_inbound_block_request() { + let cache = Cache::::default(); + let peer_ip = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234); + + // Check that the cache is empty. + assert_eq!(cache.seen_inbound_block_requests.read().len(), 0); + + // Insert a block request.. + assert_eq!(cache.insert_inbound_block_request(peer_ip), 1); + + // Check that the cache contains the block request. + assert!(cache.contains_inbound_block_request(&peer_ip)); + + // Insert another block request for the same peer. + assert_eq!(cache.insert_inbound_block_request(peer_ip), 2); + + // Check that the cache contains the block requests. + assert!(cache.contains_inbound_block_request(&peer_ip)); + } #[test] fn test_inbound_solution() { let cache = Cache::::default(); let peer_ip = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234); - let solution = PuzzleCommitment::::default(); + let solution_id = SolutionID::::from(123456789); // Check that the cache is empty. assert_eq!(cache.seen_inbound_solutions.read().len(), 0); // Insert a solution. - assert!(cache.insert_inbound_solution(peer_ip, solution).is_none()); + assert!(cache.insert_inbound_solution(peer_ip, solution_id).is_none()); // Check that the cache contains the solution. assert_eq!(cache.seen_inbound_solutions.read().len(), 1); // Insert the same solution again. - assert!(cache.insert_inbound_solution(peer_ip, solution).is_some()); + assert!(cache.insert_inbound_solution(peer_ip, solution_id).is_some()); // Check that the cache still contains the solution. assert_eq!(cache.seen_inbound_solutions.read().len(), 1); @@ -314,19 +368,19 @@ mod tests { fn test_outbound_solution() { let cache = Cache::::default(); let peer_ip = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234); - let solution = PuzzleCommitment::::default(); + let solution_id = SolutionID::::from(123456789); // Check that the cache is empty. assert_eq!(cache.seen_outbound_solutions.read().len(), 0); // Insert a solution. - assert!(cache.insert_outbound_solution(peer_ip, solution).is_none()); + assert!(cache.insert_outbound_solution(peer_ip, solution_id).is_none()); // Check that the cache contains the solution. assert_eq!(cache.seen_outbound_solutions.read().len(), 1); // Insert the same solution again. - assert!(cache.insert_outbound_solution(peer_ip, solution).is_some()); + assert!(cache.insert_outbound_solution(peer_ip, solution_id).is_some()); // Check that the cache still contains the solution. assert_eq!(cache.seen_outbound_solutions.read().len(), 1); diff --git a/node/router/src/inbound.rs b/node/router/src/inbound.rs index 1b0cf0da22..b8d985040b 100644 --- a/node/router/src/inbound.rs +++ b/node/router/src/inbound.rs @@ -30,7 +30,7 @@ use crate::{ use snarkos_node_tcp::protocols::Reading; use snarkvm::prelude::{ block::{Block, Header, Transaction}, - coinbase::{EpochChallenge, ProverSolution}, + puzzle::Solution, Network, }; @@ -39,10 +39,19 @@ use snarkos_node_tcp::is_bogon_ip; use std::{net::SocketAddr, time::Instant}; use tokio::task::spawn_blocking; +/// The max number of peers to send in a `PeerResponse` message. +const MAX_PEERS_TO_SEND: usize = u8::MAX as usize; + +/// The maximum number of blocks the client can be behind it's latest peer before it skips +/// processing incoming transactions and solutions. +pub const SYNC_LENIENCY: u32 = 10; + #[async_trait] pub trait Inbound: Reading + Outbound { /// The maximum number of puzzle requests per interval. const MAXIMUM_PUZZLE_REQUESTS_PER_INTERVAL: usize = 5; + /// The maximum number of block requests per interval. + const MAXIMUM_BLOCK_REQUESTS_PER_INTERVAL: usize = 256; /// The duration in seconds to sleep in between ping requests with a connected peer. const PING_SLEEP_IN_SECS: u64 = 20; // 20 seconds /// The time frame to enforce the `MESSAGE_LIMIT`. @@ -72,7 +81,12 @@ pub trait Inbound: Reading + Outbound { match message { Message::BlockRequest(message) => { let BlockRequest { start_height, end_height } = &message; - + // Insert the block request for the peer, and fetch the recent frequency. + let frequency = self.router().cache.insert_inbound_block_request(peer_ip); + // Check if the number of block requests is within the limit. + if frequency > Self::MAXIMUM_BLOCK_REQUESTS_PER_INTERVAL { + bail!("Peer '{peer_ip}' is not following the protocol (excessive block requests)") + } // Ensure the block request is well-formed. if start_height >= end_height { bail!("Block request from '{peer_ip}' has an invalid range ({start_height}..{end_height})") @@ -122,6 +136,10 @@ pub trait Inbound: Reading + Outbound { if !self.router().cache.contains_outbound_peer_request(peer_ip) { bail!("Peer '{peer_ip}' is not following the protocol (unexpected peer response)") } + self.router().cache.decrement_outbound_peer_requests(peer_ip); + if !self.router().allow_external_peers() { + bail!("Not accepting peer response from '{peer_ip}' (validator gossip is disabled)"); + } match self.peer_response(peer_ip, &message.peers) { true => Ok(()), @@ -195,27 +213,33 @@ pub trait Inbound: Reading + Outbound { Err(error) => bail!("[PuzzleResponse] {error}"), }; // Process the puzzle response. - match self.puzzle_response(peer_ip, message.epoch_challenge, header) { + match self.puzzle_response(peer_ip, message.epoch_hash, header) { true => Ok(()), false => bail!("Peer '{peer_ip}' sent an invalid puzzle response"), } } Message::UnconfirmedSolution(message) => { - // Clone the serialized message. - let serialized = message.clone(); + // Do not process unconfirmed solutions if the node is too far behind. + if self.num_blocks_behind() > SYNC_LENIENCY { + trace!("Skipped processing unconfirmed solution '{}' (node is syncing)", message.solution_id); + return Ok(()); + } // Update the timestamp for the unconfirmed solution. let seen_before = self.router().cache.insert_inbound_solution(peer_ip, message.solution_id).is_some(); // Determine whether to propagate the solution. if seen_before { - bail!("Skipping 'UnconfirmedSolution' from '{peer_ip}'") + trace!("Skipping 'UnconfirmedSolution' from '{peer_ip}'"); + return Ok(()); } + // Clone the serialized message. + let serialized = message.clone(); // Perform the deferred non-blocking deserialization of the solution. let solution = match message.solution.deserialize().await { Ok(solution) => solution, Err(error) => bail!("[UnconfirmedSolution] {error}"), }; // Check that the solution parameters match. - if message.solution_id != solution.commitment() { + if message.solution_id != solution.id() { bail!("Peer '{peer_ip}' is not following the 'UnconfirmedSolution' protocol") } // Handle the unconfirmed solution. @@ -225,15 +249,21 @@ pub trait Inbound: Reading + Outbound { } } Message::UnconfirmedTransaction(message) => { - // Clone the serialized message. - let serialized = message.clone(); + // Do not process unconfirmed transactions if the node is too far behind. + if self.num_blocks_behind() > SYNC_LENIENCY { + trace!("Skipped processing unconfirmed transaction '{}' (node is syncing)", message.transaction_id); + return Ok(()); + } // Update the timestamp for the unconfirmed transaction. let seen_before = self.router().cache.insert_inbound_transaction(peer_ip, message.transaction_id).is_some(); // Determine whether to propagate the transaction. if seen_before { - bail!("Skipping 'UnconfirmedTransaction' from '{peer_ip}'") + trace!("Skipping 'UnconfirmedTransaction' from '{peer_ip}'"); + return Ok(()); } + // Clone the serialized message. + let serialized = message.clone(); // Perform the deferred non-blocking deserialization of the transaction. let transaction = match message.transaction.deserialize().await { Ok(transaction) => transaction, @@ -266,13 +296,13 @@ pub trait Inbound: Reading + Outbound { let peers = match self.router().is_dev() { // In development mode, relax the validity requirements to make operating devnets more flexible. true => { - peers.into_iter().filter(|ip| *ip != peer_ip && !is_bogon_ip(ip.ip())).take(u8::MAX as usize).collect() + peers.into_iter().filter(|ip| *ip != peer_ip && !is_bogon_ip(ip.ip())).take(MAX_PEERS_TO_SEND).collect() } // In production mode, ensure the peer IPs are valid. false => peers .into_iter() .filter(|ip| *ip != peer_ip && self.router().is_valid_peer_ip(ip)) - .take(u8::MAX as usize) + .take(MAX_PEERS_TO_SEND) .collect(), }; // Send a `PeerResponse` message to the peer. @@ -282,6 +312,10 @@ pub trait Inbound: Reading + Outbound { /// Handles a `PeerResponse` message. fn peer_response(&self, _peer_ip: SocketAddr, peers: &[SocketAddr]) -> bool { + // Check if the number of peers received is less than MAX_PEERS_TO_SEND. + if peers.len() > MAX_PEERS_TO_SEND { + return false; + } // Filter out invalid addresses. let peers = match self.router().is_dev() { // In development mode, relax the validity requirements to make operating devnets more flexible. @@ -304,14 +338,14 @@ pub trait Inbound: Reading + Outbound { fn puzzle_request(&self, peer_ip: SocketAddr) -> bool; /// Handles a `PuzzleResponse` message. - fn puzzle_response(&self, peer_ip: SocketAddr, _challenge: EpochChallenge, _header: Header) -> bool; + fn puzzle_response(&self, peer_ip: SocketAddr, _epoch_hash: N::BlockHash, _header: Header) -> bool; /// Handles an `UnconfirmedSolution` message. async fn unconfirmed_solution( &self, peer_ip: SocketAddr, serialized: UnconfirmedSolution, - solution: ProverSolution, + solution: Solution, ) -> bool; /// Handles an `UnconfirmedTransaction` message. diff --git a/node/router/src/lib.rs b/node/router/src/lib.rs index 6f4465aaca..3703ea6002 100644 --- a/node/router/src/lib.rs +++ b/node/router/src/lib.rs @@ -93,6 +93,8 @@ pub struct InnerRouter { restricted_peers: RwLock>, /// The spawned handles. handles: Mutex>>, + /// If the flag is set, the node will engage in P2P gossip to request more peers. + allow_external_peers: bool, /// The boolean flag for the development mode. is_dev: bool, } @@ -115,6 +117,7 @@ impl Router { account: Account, trusted_peers: &[SocketAddr], max_peers: u16, + allow_external_peers: bool, is_dev: bool, ) -> Result { // Initialize the TCP stack. @@ -132,6 +135,7 @@ impl Router { candidate_peers: Default::default(), restricted_peers: Default::default(), handles: Default::default(), + allow_external_peers, is_dev, }))) } @@ -251,6 +255,11 @@ impl Router { self.is_dev } + /// Returns `true` if the node is engaging in P2P gossip to request more peers. + pub fn allow_external_peers(&self) -> bool { + self.allow_external_peers + } + /// Returns the listener IP address from the (ambiguous) peer address. pub fn resolve_to_listener(&self, peer_addr: &SocketAddr) -> Option { self.resolver.get_listener(peer_addr) @@ -295,6 +304,11 @@ impl Router { .unwrap_or(false) } + /// Returns `true` if the given IP is trusted. + pub fn is_trusted(&self, ip: &SocketAddr) -> bool { + self.trusted_peers.contains(ip) + } + /// Returns the maximum number of connected peers. pub fn max_connected_peers(&self) -> usize { self.tcp.config().max_connections as usize @@ -376,16 +390,27 @@ impl Router { } /// Returns the list of bootstrap peers. + #[allow(clippy::if_same_then_else)] pub fn bootstrap_peers(&self) -> Vec { if cfg!(feature = "test") || self.is_dev { + // Development testing contains no bootstrap peers. vec![] - } else { + } else if N::ID == snarkvm::console::network::MainnetV0::ID { + // Mainnet contains the following bootstrap peers. vec![ - SocketAddr::from_str("35.224.50.150:4133").unwrap(), - SocketAddr::from_str("35.227.159.141:4133").unwrap(), - SocketAddr::from_str("34.139.203.87:4133").unwrap(), - SocketAddr::from_str("34.150.221.166:4133").unwrap(), + // TODO: Populate me with Mainnet Beta IP addresses. ] + } else if N::ID == snarkvm::console::network::TestnetV0::ID { + // Testnet contains the following bootstrap peers. + vec![ + SocketAddr::from_str("34.168.118.156:4130").unwrap(), + SocketAddr::from_str("35.231.152.213:4130").unwrap(), + SocketAddr::from_str("34.17.53.129:4130").unwrap(), + SocketAddr::from_str("35.200.149.162:4130").unwrap(), + ] + } else { + // Unrecognized networks contain no bootstrap peers. + vec![] } } diff --git a/node/router/src/outbound.rs b/node/router/src/outbound.rs index 20713962c9..716a745086 100644 --- a/node/router/src/outbound.rs +++ b/node/router/src/outbound.rs @@ -28,6 +28,12 @@ pub trait Outbound: Writing> { /// Returns a reference to the router. fn router(&self) -> &Router; + /// Returns `true` if the node is synced up to the latest block (within the given tolerance). + fn is_block_synced(&self) -> bool; + + /// Returns the number of blocks this node is behind the greatest peer height. + fn num_blocks_behind(&self) -> u32; + /// Sends a "Ping" message to the given peer. fn send_ping(&self, peer_ip: SocketAddr, block_locators: Option>) { self.send(peer_ip, Message::Ping(Ping::new(self.router().node_type(), block_locators))); diff --git a/node/router/src/routing.rs b/node/router/src/routing.rs index 63715124f9..d2d2457c62 100644 --- a/node/router/src/routing.rs +++ b/node/router/src/routing.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{messages::Message, Heartbeat, Inbound, Outbound}; +use crate::{Heartbeat, Inbound, Outbound}; use snarkos_node_tcp::{ protocols::{Disconnect, Handshake, OnConnect}, P2P, @@ -37,9 +37,6 @@ pub trait Routing: self.enable_listener().await; // Initialize the heartbeat. self.initialize_heartbeat(); - // Initialize the report. - #[cfg(not(feature = "test"))] - self.initialize_report(); } // Start listening for inbound connections. @@ -59,24 +56,4 @@ pub trait Routing: } }); } - - /// Initialize a new instance of the report. - fn initialize_report(&self) { - let self_clone = self.clone(); - self.router().spawn(async move { - loop { - // Prepare the report. - let mut report = std::collections::HashMap::new(); - report.insert("message_version".to_string(), Message::::VERSION.to_string()); - report.insert("node_address".to_string(), self_clone.router().address().to_string()); - report.insert("node_type".to_string(), self_clone.router().node_type().to_string()); - report.insert("is_dev".to_string(), self_clone.router().is_dev().to_string()); - // Transmit the report. - let url = "https://vm.aleo.org/testnet3/report"; - let _ = reqwest::Client::new().post(url).json(&report).send().await; - // Sleep for a fixed duration in seconds. - tokio::time::sleep(Duration::from_secs(6 * 60 * 60)).await; - } - }); - } } diff --git a/node/router/tests/cleanups.rs b/node/router/tests/cleanups.rs index 587b0db2ff..3a8b8ed83f 100644 --- a/node/router/tests/cleanups.rs +++ b/node/router/tests/cleanups.rs @@ -43,7 +43,7 @@ async fn test_connection_cleanups() { let node = match rng.gen_range(0..3) % 3 { 0 => client(0, 1).await, 1 => prover(0, 1).await, - 2 => validator(0, 1).await, + 2 => validator(0, 1, &[], true).await, _ => unreachable!(), }; diff --git a/node/router/tests/common/mod.rs b/node/router/tests/common/mod.rs index 9807dc4100..b42b0a2063 100644 --- a/node/router/tests/common/mod.rs +++ b/node/router/tests/common/mod.rs @@ -24,7 +24,7 @@ use std::{ use snarkos_account::Account; use snarkos_node_router::{messages::NodeType, Router}; -use snarkvm::prelude::{block::Block, FromBytes, Network, Testnet3 as CurrentNetwork}; +use snarkvm::prelude::{block::Block, FromBytes, MainnetV0 as CurrentNetwork, Network}; /// A helper macro to print the TCP listening address, along with the connected and connecting peers. #[macro_export] @@ -78,6 +78,7 @@ pub async fn client(listening_port: u16, max_peers: u16) -> TestRouter TestRouter TestRouter TestRouter { +pub async fn validator( + listening_port: u16, + max_peers: u16, + trusted_peers: &[SocketAddr], + allow_external_peers: bool, +) -> TestRouter { Router::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), listening_port), NodeType::Validator, sample_account(), - &[], + trusted_peers, max_peers, + allow_external_peers, true, ) .await diff --git a/node/router/tests/common/router.rs b/node/router/tests/common/router.rs index 0ca9f721d4..44a6f3c74e 100644 --- a/node/router/tests/common/router.rs +++ b/node/router/tests/common/router.rs @@ -39,7 +39,7 @@ use snarkos_node_tcp::{ }; use snarkvm::prelude::{ block::{Block, Header, Transaction}, - coinbase::{EpochChallenge, ProverSolution}, + puzzle::Solution, Network, }; @@ -149,6 +149,16 @@ impl Outbound for TestRouter { fn router(&self) -> &Router { &self.0 } + + /// Returns `true` if the node is synced up to the latest block (within the given tolerance). + fn is_block_synced(&self) -> bool { + true + } + + /// Returns the number of blocks this node is behind the greatest peer height. + fn num_blocks_behind(&self) -> u32 { + 0 + } } #[async_trait] @@ -179,7 +189,7 @@ impl Inbound for TestRouter { } /// Handles an `PuzzleResponse` message. - fn puzzle_response(&self, _peer_ip: SocketAddr, _epoch_challenge: EpochChallenge, _header: Header) -> bool { + fn puzzle_response(&self, _peer_ip: SocketAddr, _epoch_hash: N::BlockHash, _header: Header) -> bool { true } @@ -188,7 +198,7 @@ impl Inbound for TestRouter { &self, _peer_ip: SocketAddr, _serialized: UnconfirmedSolution, - _solution: ProverSolution, + _solution: Solution, ) -> bool { true } diff --git a/node/router/tests/connect.rs b/node/router/tests/connect.rs index 8264ab93d2..60c9b15449 100644 --- a/node/router/tests/connect.rs +++ b/node/router/tests/connect.rs @@ -22,7 +22,7 @@ use core::time::Duration; #[tokio::test] async fn test_connect_without_handshake() { // Create 2 routers. - let node0 = validator(0, 2).await; + let node0 = validator(0, 2, &[], true).await; let node1 = client(0, 2).await; assert_eq!(node0.number_of_connected_peers(), 0); assert_eq!(node1.number_of_connected_peers(), 0); @@ -78,7 +78,7 @@ async fn test_connect_without_handshake() { #[tokio::test] async fn test_connect_with_handshake() { // Create 2 routers. - let node0 = validator(0, 2).await; + let node0 = validator(0, 2, &[], true).await; let node1 = client(0, 2).await; assert_eq!(node0.number_of_connected_peers(), 0); assert_eq!(node1.number_of_connected_peers(), 0); @@ -150,11 +150,64 @@ async fn test_connect_with_handshake() { } } +#[tokio::test] +async fn test_validator_connection() { + // Create first router and start listening. + let node0 = validator(0, 2, &[], false).await; + assert_eq!(node0.number_of_connected_peers(), 0); + node0.enable_handshake().await; + node0.tcp().enable_listener().await.unwrap(); + + // Get the local IP address from the first router. + let addr0 = node0.local_ip(); + + // Create second router, trusting the first router, and start listening. + let node1 = validator(0, 2, &[addr0], false).await; + assert_eq!(node1.number_of_connected_peers(), 0); + node1.enable_handshake().await; + node1.tcp().enable_listener().await.unwrap(); + + { + // Connect node0 to node1. + node0.connect(node1.local_ip()); + // Sleep briefly. + tokio::time::sleep(Duration::from_millis(200)).await; + + print_tcp!(node0); + print_tcp!(node1); + + // Check the TCP level - connection was accepted. + assert_eq!(node0.tcp().num_connected(), 1); + assert_eq!(node1.tcp().num_connected(), 1); + + // Check the router level - connection was accepted. + assert_eq!(node0.number_of_connected_peers(), 1); + assert_eq!(node1.number_of_connected_peers(), 1); + + // Disconnect the nodes. + node0.disconnect(node1.local_ip()); + node1.disconnect(node0.local_ip()); + + // Connect node1 to node0. + node1.connect(node0.local_ip()); + // Sleep briefly. + tokio::time::sleep(Duration::from_millis(200)).await; + + // Check the TCP level - connection was not accepted. + assert_eq!(node0.tcp().num_connected(), 0); + assert_eq!(node1.tcp().num_connected(), 0); + + // Check the router level - connection was not accepted. + assert_eq!(node0.number_of_connected_peers(), 0); + assert_eq!(node1.number_of_connected_peers(), 0); + } +} + #[ignore] #[tokio::test] async fn test_connect_simultaneously_with_handshake() { // Create 2 routers. - let node0 = validator(0, 2).await; + let node0 = validator(0, 2, &[], true).await; let node1 = client(0, 2).await; assert_eq!(node0.number_of_connected_peers(), 0); assert_eq!(node1.number_of_connected_peers(), 0); diff --git a/node/router/tests/disconnect.rs b/node/router/tests/disconnect.rs index bbcb6c8e33..df9eee1016 100644 --- a/node/router/tests/disconnect.rs +++ b/node/router/tests/disconnect.rs @@ -22,7 +22,7 @@ use core::time::Duration; #[tokio::test] async fn test_disconnect_without_handshake() { // Create 2 routers. - let node0 = validator(0, 1).await; + let node0 = validator(0, 1, &[], true).await; let node1 = client(0, 1).await; assert_eq!(node0.number_of_connected_peers(), 0); assert_eq!(node1.number_of_connected_peers(), 0); @@ -64,7 +64,7 @@ async fn test_disconnect_without_handshake() { #[tokio::test] async fn test_disconnect_with_handshake() { // Create 2 routers. - let node0 = validator(0, 1).await; + let node0 = validator(0, 1, &[], true).await; let node1 = client(0, 1).await; assert_eq!(node0.number_of_connected_peers(), 0); assert_eq!(node1.number_of_connected_peers(), 0); diff --git a/node/src/client/mod.rs b/node/src/client/mod.rs index a26f4e5510..f098f0af3e 100644 --- a/node/src/client/mod.rs +++ b/node/src/client/mod.rs @@ -35,7 +35,7 @@ use snarkvm::{ console::network::Network, ledger::{ block::{Block, Header}, - coinbase::{CoinbasePuzzle, EpochChallenge, ProverSolution}, + puzzle::{Puzzle, Solution}, store::ConsensusStorage, Ledger, }, @@ -64,8 +64,8 @@ pub struct Client> { sync: Arc>, /// The genesis block. genesis: Block, - /// The coinbase puzzle. - coinbase_puzzle: CoinbasePuzzle, + /// The puzzle. + puzzle: Puzzle, /// The spawned handles. handles: Arc>>>, /// The shutdown signal. @@ -83,17 +83,14 @@ impl> Client { genesis: Block, cdn: Option, storage_mode: StorageMode, + shutdown: Arc, ) -> Result { - // Prepare the shutdown flag. - let shutdown: Arc = Default::default(); - // Initialize the signal handler. let signal_node = Self::handle_signals(shutdown.clone()); // Initialize the ledger. let ledger = Ledger::::load(genesis.clone(), storage_mode.clone())?; - // TODO: Remove me after Phase 3. - let ledger = crate::phase_3_reset(ledger, storage_mode.clone())?; + // Initialize the CDN. if let Some(base_url) = cdn { // Sync the ledger with the CDN. @@ -109,6 +106,8 @@ impl> Client { let ledger_service = Arc::new(CoreLedgerService::::new(ledger.clone(), shutdown.clone())); // Initialize the sync module. let sync = BlockSync::new(BlockSyncMode::Router, ledger_service.clone()); + // Determine if the client should allow external peers. + let allow_external_peers = true; // Initialize the node router. let router = Router::new( @@ -117,11 +116,10 @@ impl> Client { account, trusted_peers, Self::MAXIMUM_NUMBER_OF_PEERS as u16, + allow_external_peers, matches!(storage_mode, StorageMode::Development(_)), ) .await?; - // Load the coinbase puzzle. - let coinbase_puzzle = CoinbasePuzzle::::load()?; // Initialize the node. let mut node = Self { ledger: ledger.clone(), @@ -129,7 +127,7 @@ impl> Client { rest: None, sync: Arc::new(sync), genesis, - coinbase_puzzle, + puzzle: ledger.puzzle().clone(), handles: Default::default(), shutdown, }; diff --git a/node/src/client/router.rs b/node/src/client/router.rs index 6123f75e32..53f7356f8e 100644 --- a/node/src/client/router.rs +++ b/node/src/client/router.rs @@ -163,6 +163,16 @@ impl> Outbound for Client { fn router(&self) -> &Router { &self.router } + + /// Returns `true` if the node is synced up to the latest block (within the given tolerance). + fn is_block_synced(&self) -> bool { + self.sync.is_block_synced() + } + + /// Returns the number of blocks this node is behind the greatest peer height. + fn num_blocks_behind(&self) -> u32 { + self.sync.num_blocks_behind() + } } #[async_trait] @@ -235,11 +245,11 @@ impl> Inbound for Client { true } - /// Disconnects on receipt of a `PuzzleRequest` message. + /// Retrieves the latest epoch hash and latest block header, and returns the puzzle response to the peer. fn puzzle_request(&self, peer_ip: SocketAddr) -> bool { - // Retrieve the latest epoch challenge. - let epoch_challenge = match self.ledger.latest_epoch_challenge() { - Ok(epoch_challenge) => epoch_challenge, + // Retrieve the latest epoch hash. + let epoch_hash = match self.ledger.latest_epoch_hash() { + Ok(epoch_hash) => epoch_hash, Err(error) => { error!("Failed to prepare a puzzle request for '{peer_ip}': {error}"); return false; @@ -248,12 +258,12 @@ impl> Inbound for Client { // Retrieve the latest block header. let block_header = Data::Object(self.ledger.latest_header()); // Send the `PuzzleResponse` message to the peer. - Outbound::send(self, peer_ip, Message::PuzzleResponse(PuzzleResponse { epoch_challenge, block_header })); + Outbound::send(self, peer_ip, Message::PuzzleResponse(PuzzleResponse { epoch_hash, block_header })); true } - /// Saves the latest epoch challenge and latest block header in the node. - fn puzzle_response(&self, peer_ip: SocketAddr, _epoch_challenge: EpochChallenge, _header: Header) -> bool { + /// Saves the latest epoch hash and latest block header in the node. + fn puzzle_response(&self, peer_ip: SocketAddr, _epoch_hash: N::BlockHash, _header: Header) -> bool { debug!("Disconnecting '{peer_ip}' for the following reason - {:?}", DisconnectReason::ProtocolViolation); false } @@ -263,30 +273,33 @@ impl> Inbound for Client { &self, peer_ip: SocketAddr, serialized: UnconfirmedSolution, - solution: ProverSolution, + solution: Solution, ) -> bool { - // Retrieve the latest epoch challenge. - if let Ok(epoch_challenge) = self.ledger.latest_epoch_challenge() { + // Retrieve the latest epoch hash. + if let Ok(epoch_hash) = self.ledger.latest_epoch_hash() { // Retrieve the latest proof target. let proof_target = self.ledger.latest_block().header().proof_target(); - // Ensure that the prover solution is valid for the given epoch. - let coinbase_puzzle = self.coinbase_puzzle.clone(); - let is_valid = tokio::task::spawn_blocking(move || { - solution.verify(coinbase_puzzle.coinbase_verifying_key(), &epoch_challenge, proof_target) - }) - .await; + // Ensure that the solution is valid for the given epoch. + let puzzle = self.puzzle.clone(); + let is_valid = + tokio::task::spawn_blocking(move || puzzle.check_solution(&solution, epoch_hash, proof_target)).await; match is_valid { // If the solution is valid, propagate the `UnconfirmedSolution`. - Ok(Ok(true)) => { + Ok(Ok(())) => { let message = Message::UnconfirmedSolution(serialized); // Propagate the "UnconfirmedSolution". self.propagate(message, &[peer_ip]); } - Ok(Ok(false)) | Ok(Err(_)) => { - trace!("Invalid prover solution '{}' for the proof target.", solution.commitment()) + Ok(Err(_)) => { + trace!("Invalid solution '{}' for the proof target.", solution.id()) + } + // If error occurs after the first 10 blocks of the epoch, log it as a warning, otherwise ignore. + Err(error) => { + if self.ledger.latest_height() % N::NUM_BLOCKS_PER_EPOCH > 10 { + warn!("Failed to verify the solution - {error}") + } } - Err(error) => warn!("Failed to verify the prover solution: {error}"), } } true diff --git a/node/src/lib.rs b/node/src/lib.rs index aa84031492..4d7dfde7a7 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -60,156 +60,15 @@ pub fn log_clean_error(storage_mode: &StorageMode) { } } -use snarkvm::{ - ledger::store::ConsensusStorage, - prelude::{const_assert, hrp2, AleoID, Field, Ledger, Network}, -}; - -use anyhow::{bail, Result}; - -// TODO: Remove me after Phase 3. -pub fn phase_3_reset>( - ledger: Ledger, - storage_mode: StorageMode, -) -> Result> { - use core::str::FromStr; - - /// Removes the specified ledger from storage. - pub(crate) fn remove_ledger(network: u16, storage_mode: &StorageMode) -> Result { - // Construct the path to the ledger in storage. - let mut path = aleo_std::aleo_ledger_dir(network, storage_mode.clone()); - - // Delete the parent folder. - path.pop(); - - // Prepare the path string. - let path_string = format!("(in \"{}\")", path.display()); - - // Check if the path to the ledger exists in storage. - if path.exists() { - // Remove the ledger files from storage. - match std::fs::remove_dir_all(&path) { - Ok(_) => Ok(format!("✅ Cleaned the snarkOS node storage {path_string}")), - Err(error) => { - bail!("Failed to remove the snarkOS node storage {path_string}\n{}", error.to_string()) - } - } - } else { - Ok(format!("✅ No snarkOS node storage was found {path_string}")) - } - } - - type ID = AleoID, { hrp2!("ab") }>; - - if let Ok(block) = ledger.get_block(28250) { - if *block.hash() == *ID::::from_str("ab1fxetqjm0ppruay8vlg6gtt52d5fkeydmrk0talp04ymjm65acg9sh8d0r5")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(28251) { - if *block.hash() == *ID::::from_str("ab1ngmc9wf3kz73lxg9ylx75vday82a26xqthjykzrwyhngnr25uvqqau9eyh")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(28252) { - if *block.hash() == *ID::::from_str("ab1k6msq00mzrlmm3e0xzgynks5mqh2zrhd35akqqts24sd9u5x9yxs355qgv")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(115314) { - if *block.hash() == *ID::::from_str("ab13eckyhvhpv5zdhw8xz2zskrmm0a5hgeq7f5sjaw4errx0678pgpsjhuaqf")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(115315) { - if *block.hash() == *ID::::from_str("ab1axs5ltm6kjezsjxw35taf3xjpherrhpu6868h3ezhc3ap8pyrggqrrkjcg")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(726845) { - if *block.hash() == *ID::::from_str("ab1tf3v9qef0uh3ygsc0qqem7dzeyy2m8aqz583a80z60l8t5l22u9s84y38z")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(997810) { - if *block.hash() == *ID::::from_str("ab1pap9sxh5fcskw7l3msax4fq2mrqd80kxp0epx9dguxua2e8dacys78key5")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } else if let Ok(block) = ledger.get_block(997810) { - if *block.hash() == *ID::::from_str("ab1fx4mpz0fdqx75djf3n9grsjkc229xfs8fzmjqsxkajtj8j8sdurqufgvyz")? { - let genesis = ledger.get_block(0)?; - drop(ledger); - println!( - "{}.\n\n\nMIGRATION SUCCEEDED. RESTART THIS SNARKOS NODE AGAIN.\n\n", - remove_ledger(N::ID, &storage_mode)? - ); - // Sleep for 5 seconds to allow the user to read the message. - std::thread::sleep(std::time::Duration::from_secs(5)); - return Ledger::::load(genesis.clone(), storage_mode); - } - } - Ok(ledger) -} - /// Starts the notification message loop. pub fn start_notification_message_loop() -> tokio::task::JoinHandle<()> { - let mut interval = tokio::time::interval(std::time::Duration::from_secs(180)); + // let mut interval = tokio::time::interval(std::time::Duration::from_secs(180)); tokio::spawn(async move { - loop { - interval.tick().await; - // TODO (howardwu): Swap this with the official message for Testnet 3 announcements. - // info!("{}", notification_message()); - } + // loop { + // interval.tick().await; + // // TODO (howardwu): Swap this with the official message for announcements. + // // info!("{}", notification_message()); + // } }) } @@ -222,7 +81,7 @@ pub fn notification_message() -> String { ================================================================================================== - 🚧 Welcome to Aleo Testnet 3 Phase 3 - Calibration Period 🚧 + 🚧 Welcome to Aleo - Calibration Period 🚧 ================================================================================================== @@ -249,7 +108,7 @@ pub fn notification_message() -> String { - IMPORTANT: EXPECT MULTIPLE NETWORK RESETS. - If participating, BE PREPARED TO RESET YOUR NODE AT ANY TIME. - When a reset occurs, RUN THE FOLLOWING TO RESET YOUR NODE: - - git checkout testnet3 && git pull + - git checkout mainnet && git pull - cargo install --locked --path . - snarkos clean - snarkos start --nodisplay --client diff --git a/node/src/node.rs b/node/src/node.rs index e6435d6ef6..f5fe6d2faa 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -26,7 +26,10 @@ use snarkvm::prelude::{ use aleo_std::StorageMode; use anyhow::Result; -use std::{net::SocketAddr, sync::Arc}; +use std::{ + net::SocketAddr, + sync::{atomic::AtomicBool, Arc}, +}; pub enum Node { /// A validator is a full node, capable of validating blocks. @@ -50,6 +53,9 @@ impl Node { genesis: Block, cdn: Option, storage_mode: StorageMode, + allow_external_peers: bool, + dev_txs: bool, + shutdown: Arc, ) -> Result { Ok(Self::Validator(Arc::new( Validator::new( @@ -63,6 +69,9 @@ impl Node { genesis, cdn, storage_mode, + allow_external_peers, + dev_txs, + shutdown, ) .await?, ))) @@ -75,8 +84,9 @@ impl Node { trusted_peers: &[SocketAddr], genesis: Block, storage_mode: StorageMode, + shutdown: Arc, ) -> Result { - Ok(Self::Prover(Arc::new(Prover::new(node_ip, account, trusted_peers, genesis, storage_mode).await?))) + Ok(Self::Prover(Arc::new(Prover::new(node_ip, account, trusted_peers, genesis, storage_mode, shutdown).await?))) } /// Initializes a new client node. @@ -89,9 +99,11 @@ impl Node { genesis: Block, cdn: Option, storage_mode: StorageMode, + shutdown: Arc, ) -> Result { Ok(Self::Client(Arc::new( - Client::new(node_ip, rest_ip, rest_rps, account, trusted_peers, genesis, cdn, storage_mode).await?, + Client::new(node_ip, rest_ip, rest_rps, account, trusted_peers, genesis, cdn, storage_mode, shutdown) + .await?, ))) } diff --git a/node/src/prover/mod.rs b/node/src/prover/mod.rs index 0872a35fd7..2286ed0370 100644 --- a/node/src/prover/mod.rs +++ b/node/src/prover/mod.rs @@ -34,10 +34,11 @@ use snarkvm::{ ledger::narwhal::Data, prelude::{ block::{Block, Header}, - coinbase::{CoinbasePuzzle, EpochChallenge, ProverSolution}, + puzzle::{Puzzle, Solution}, store::ConsensusStorage, Network, }, + synthesizer::VM, }; use aleo_std::StorageMode; @@ -46,6 +47,7 @@ use colored::Colorize; use core::{marker::PhantomData, time::Duration}; use parking_lot::{Mutex, RwLock}; use rand::{rngs::OsRng, CryptoRng, Rng}; +use snarkos_node_bft::helpers::fmt_id; use std::{ net::SocketAddr, sync::{ @@ -64,10 +66,10 @@ pub struct Prover> { sync: Arc>, /// The genesis block. genesis: Block, - /// The coinbase puzzle. - coinbase_puzzle: CoinbasePuzzle, - /// The latest epoch challenge. - latest_epoch_challenge: Arc>>>>, + /// The puzzle. + puzzle: Puzzle, + /// The latest epoch hash. + latest_epoch_hash: Arc>>, /// The latest block header. latest_block_header: Arc>>>, /// The number of puzzle instances. @@ -90,10 +92,8 @@ impl> Prover { trusted_peers: &[SocketAddr], genesis: Block, storage_mode: StorageMode, + shutdown: Arc, ) -> Result { - // Prepare the shutdown flag. - let shutdown: Arc = Default::default(); - // Initialize the signal handler. let signal_node = Self::handle_signals(shutdown.clone()); @@ -101,6 +101,8 @@ impl> Prover { let ledger_service = Arc::new(ProverLedgerService::new()); // Initialize the sync module. let sync = BlockSync::new(BlockSyncMode::Router, ledger_service.clone()); + // Determine if the prover should allow external peers. + let allow_external_peers = true; // Initialize the node router. let router = Router::new( @@ -109,11 +111,10 @@ impl> Prover { account, trusted_peers, Self::MAXIMUM_NUMBER_OF_PEERS as u16, + allow_external_peers, matches!(storage_mode, StorageMode::Development(_)), ) .await?; - // Load the coinbase puzzle. - let coinbase_puzzle = CoinbasePuzzle::::load()?; // Compute the maximum number of puzzle instances. let max_puzzle_instances = num_cpus::get().saturating_sub(2).clamp(1, 6); // Initialize the node. @@ -121,8 +122,8 @@ impl> Prover { router, sync: Arc::new(sync), genesis, - coinbase_puzzle, - latest_epoch_challenge: Default::default(), + puzzle: VM::::new_puzzle()?, + latest_epoch_hash: Default::default(), latest_block_header: Default::default(), puzzle_instances: Default::default(), max_puzzle_instances: u8::try_from(max_puzzle_instances)?, @@ -132,8 +133,8 @@ impl> Prover { }; // Initialize the routing. node.initialize_routing().await; - // Initialize the coinbase puzzle. - node.initialize_coinbase_puzzle().await; + // Initialize the puzzle. + node.initialize_puzzle().await; // Initialize the notification message loop. node.handles.lock().push(crate::start_notification_message_loop()); // Pass the node to the signal handler. @@ -149,12 +150,12 @@ impl> NodeInterface for Prover { async fn shut_down(&self) { info!("Shutting down..."); - // Shut down the coinbase puzzle. - trace!("Shutting down the coinbase puzzle..."); + // Shut down the puzzle. + debug!("Shutting down the puzzle..."); self.shutdown.store(true, Ordering::Relaxed); // Abort the tasks. - trace!("Shutting down the prover..."); + debug!("Shutting down the prover..."); self.handles.lock().iter().for_each(|handle| handle.abort()); // Shut down the router. @@ -165,35 +166,35 @@ impl> NodeInterface for Prover { } impl> Prover { - /// Initialize a new instance of the coinbase puzzle. - async fn initialize_coinbase_puzzle(&self) { + /// Initialize a new instance of the puzzle. + async fn initialize_puzzle(&self) { for _ in 0..self.max_puzzle_instances { let prover = self.clone(); self.handles.lock().push(tokio::spawn(async move { - prover.coinbase_puzzle_loop().await; + prover.puzzle_loop().await; })); } } - /// Executes an instance of the coinbase puzzle. - async fn coinbase_puzzle_loop(&self) { + /// Executes an instance of the puzzle. + async fn puzzle_loop(&self) { loop { // If the node is not connected to any peers, then skip this iteration. if self.router.number_of_connected_peers() == 0 { - trace!("Skipping an iteration of the coinbase puzzle (no connected peers)"); + debug!("Skipping an iteration of the puzzle (no connected peers)"); tokio::time::sleep(Duration::from_secs(N::ANCHOR_TIME as u64)).await; continue; } - // If the number of instances of the coinbase puzzle exceeds the maximum, then skip this iteration. + // If the number of instances of the puzzle exceeds the maximum, then skip this iteration. if self.num_puzzle_instances() > self.max_puzzle_instances { // Sleep for a brief period of time. tokio::time::sleep(Duration::from_millis(500)).await; continue; } - // Read the latest epoch challenge. - let latest_epoch_challenge = self.latest_epoch_challenge.read().clone(); + // Read the latest epoch hash. + let latest_epoch_hash = *self.latest_epoch_hash.read(); // Read the latest state. let latest_state = self .latest_block_header @@ -201,20 +202,20 @@ impl> Prover { .as_ref() .map(|header| (header.coinbase_target(), header.proof_target())); - // If the latest epoch challenge and latest state exists, then proceed to generate a prover solution. - if let (Some(challenge), Some((coinbase_target, proof_target))) = (latest_epoch_challenge, latest_state) { - // Execute the coinbase puzzle. + // If the latest epoch hash and latest state exists, then proceed to generate a solution. + if let (Some(epoch_hash), Some((coinbase_target, proof_target))) = (latest_epoch_hash, latest_state) { + // Execute the puzzle. let prover = self.clone(); let result = tokio::task::spawn_blocking(move || { - prover.coinbase_puzzle_iteration(&challenge, coinbase_target, proof_target, &mut OsRng) + prover.puzzle_iteration(epoch_hash, coinbase_target, proof_target, &mut OsRng) }) .await; // If the prover found a solution, then broadcast it. if let Ok(Some((solution_target, solution))) = result { - info!("Found a Solution '{}' (Proof Target {solution_target})", solution.commitment()); - // Broadcast the prover solution. - self.broadcast_prover_solution(solution); + info!("Found a Solution '{}' (Proof Target {solution_target})", solution.id()); + // Broadcast the solution. + self.broadcast_solution(solution); } } else { // Otherwise, sleep for a brief period of time, to await for puzzle state. @@ -223,38 +224,34 @@ impl> Prover { // If the Ctrl-C handler registered the signal, stop the prover. if self.shutdown.load(Ordering::Relaxed) { - trace!("Shutting down the coinbase puzzle..."); + debug!("Shutting down the puzzle..."); break; } } } - /// Performs one iteration of the coinbase puzzle. - fn coinbase_puzzle_iteration( + /// Performs one iteration of the puzzle. + fn puzzle_iteration( &self, - epoch_challenge: &EpochChallenge, + epoch_hash: N::BlockHash, coinbase_target: u64, proof_target: u64, rng: &mut R, - ) -> Option<(u64, ProverSolution)> { + ) -> Option<(u64, Solution)> { // Increment the puzzle instances. self.increment_puzzle_instances(); - trace!( - "Proving 'CoinbasePuzzle' {}", - format!( - "(Epoch {}, Coinbase Target {coinbase_target}, Proof Target {proof_target})", - epoch_challenge.epoch_number(), - ) - .dimmed() + debug!( + "Proving 'Puzzle' for Epoch '{}' {}", + fmt_id(epoch_hash), + format!("(Coinbase Target {coinbase_target}, Proof Target {proof_target})").dimmed() ); - // Compute the prover solution. - let result = self - .coinbase_puzzle - .prove(epoch_challenge, self.address(), rng.gen(), Some(proof_target)) - .ok() - .and_then(|solution| solution.to_target().ok().map(|solution_target| (solution_target, solution))); + // Compute the solution. + let result = + self.puzzle.prove(epoch_hash, self.address(), rng.gen(), Some(proof_target)).ok().and_then(|solution| { + self.puzzle.get_proof_target(&solution).ok().map(|solution_target| (solution_target, solution)) + }); // Decrement the puzzle instances. self.decrement_puzzle_instances(); @@ -262,12 +259,12 @@ impl> Prover { result } - /// Broadcasts the prover solution to the network. - fn broadcast_prover_solution(&self, prover_solution: ProverSolution) { + /// Broadcasts the solution to the network. + fn broadcast_solution(&self, solution: Solution) { // Prepare the unconfirmed solution message. let message = Message::UnconfirmedSolution(UnconfirmedSolution { - solution_id: prover_solution.commitment(), - solution: Data::Object(prover_solution), + solution_id: solution.id(), + solution: Data::Object(solution), }); // Propagate the "UnconfirmedSolution". self.propagate(message, &[]); diff --git a/node/src/prover/router.rs b/node/src/prover/router.rs index f12a876166..46935e2c5b 100644 --- a/node/src/prover/router.rs +++ b/node/src/prover/router.rs @@ -117,13 +117,13 @@ impl> Reading for Prover { impl> Routing for Prover {} impl> Heartbeat for Prover { - /// This function updates the coinbase puzzle if network has updated. + /// This function updates the puzzle if network has updated. fn handle_puzzle_request(&self) { // Find the sync peers. if let Some((sync_peers, _)) = self.sync.find_sync_peers() { // Choose the peer with the highest block height. if let Some((peer_ip, _)) = sync_peers.into_iter().max_by_key(|(_, height)| *height) { - // Request the coinbase puzzle from the peer. + // Request the puzzle from the peer. Outbound::send(self, peer_ip, Message::PuzzleRequest(PuzzleRequest)); } } @@ -135,6 +135,16 @@ impl> Outbound for Prover { fn router(&self) -> &Router { &self.router } + + /// Returns `true` if the node is synced up to the latest block (within the given tolerance). + fn is_block_synced(&self) -> bool { + true + } + + /// Returns the number of blocks this node is behind the greatest peer height. + fn num_blocks_behind(&self) -> u32 { + 0 + } } #[async_trait] @@ -192,25 +202,23 @@ impl> Inbound for Prover { false } - /// Saves the latest epoch challenge and latest block header in the node. - fn puzzle_response(&self, peer_ip: SocketAddr, epoch_challenge: EpochChallenge, header: Header) -> bool { - // Retrieve the epoch number. - let epoch_number = epoch_challenge.epoch_number(); + /// Saves the latest epoch hash and latest block header in the node. + fn puzzle_response(&self, peer_ip: SocketAddr, epoch_hash: N::BlockHash, header: Header) -> bool { // Retrieve the block height. let block_height = header.height(); info!( - "Coinbase Puzzle (Epoch {epoch_number}, Block {block_height}, Coinbase Target {}, Proof Target {})", + "Puzzle (Block {block_height}, Coinbase Target {}, Proof Target {})", header.coinbase_target(), header.proof_target() ); - // Save the latest epoch challenge in the node. - self.latest_epoch_challenge.write().replace(Arc::new(epoch_challenge)); + // Save the latest epoch hash in the node. + self.latest_epoch_hash.write().replace(epoch_hash); // Save the latest block header in the node. self.latest_block_header.write().replace(header); - trace!("Received 'PuzzleResponse' from '{peer_ip}' (Epoch {epoch_number}, Block {block_height})"); + trace!("Received 'PuzzleResponse' from '{peer_ip}' (Block {block_height})"); true } @@ -219,32 +227,37 @@ impl> Inbound for Prover { &self, peer_ip: SocketAddr, serialized: UnconfirmedSolution, - solution: ProverSolution, + solution: Solution, ) -> bool { - // Retrieve the latest epoch challenge. - let epoch_challenge = self.latest_epoch_challenge.read().clone(); + // Retrieve the latest epoch hash. + let epoch_hash = *self.latest_epoch_hash.read(); // Retrieve the latest proof target. let proof_target = self.latest_block_header.read().as_ref().map(|header| header.proof_target()); - if let (Some(epoch_challenge), Some(proof_target)) = (epoch_challenge, proof_target) { - // Ensure that the prover solution is valid for the given epoch. - let coinbase_puzzle = self.coinbase_puzzle.clone(); - let is_valid = tokio::task::spawn_blocking(move || { - solution.verify(coinbase_puzzle.coinbase_verifying_key(), &epoch_challenge, proof_target) - }) - .await; + if let (Some(epoch_hash), Some(proof_target)) = (epoch_hash, proof_target) { + // Ensure that the solution is valid for the given epoch. + let puzzle = self.puzzle.clone(); + let is_valid = + tokio::task::spawn_blocking(move || puzzle.check_solution(&solution, epoch_hash, proof_target)).await; match is_valid { // If the solution is valid, propagate the `UnconfirmedSolution`. - Ok(Ok(true)) => { + Ok(Ok(())) => { let message = Message::UnconfirmedSolution(serialized); // Propagate the "UnconfirmedSolution". self.propagate(message, &[peer_ip]); } - Ok(Ok(false)) | Ok(Err(_)) => { - trace!("Invalid prover solution '{}' for the proof target.", solution.commitment()) + Ok(Err(_)) => { + trace!("Invalid solution '{}' for the proof target.", solution.id()) + } + // If error occurs after the first 10 blocks of the epoch, log it as a warning, otherwise ignore. + Err(error) => { + if let Some(height) = self.latest_block_header.read().as_ref().map(|header| header.height()) { + if height % N::NUM_BLOCKS_PER_EPOCH > 10 { + warn!("Failed to verify the solution - {error}") + } + } } - Err(error) => warn!("Failed to verify the prover solution: {error}"), } } true diff --git a/node/src/traits.rs b/node/src/traits.rs index 12947ca3f2..e3cd8ad7ed 100644 --- a/node/src/traits.rs +++ b/node/src/traits.rs @@ -17,6 +17,8 @@ use snarkvm::prelude::{Address, Network, PrivateKey, ViewKey}; use once_cell::sync::OnceCell; use std::{ + future::Future, + io, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -53,16 +55,47 @@ pub trait NodeInterface: Routing { /// Handles OS signals for the node to intercept and perform a clean shutdown. /// The optional `shutdown_flag` flag can be used to cleanly terminate the syncing process. - /// Note: Only Ctrl-C is supported; it should work on both Unix-family systems and Windows. fn handle_signals(shutdown_flag: Arc) -> Arc> { // In order for the signal handler to be started as early as possible, a reference to the node needs // to be passed to it at a later time. let node: Arc> = Default::default(); + #[cfg(target_family = "unix")] + fn signal_listener() -> impl Future> { + use tokio::signal::unix::{signal, SignalKind}; + + // Handle SIGINT, SIGTERM, SIGQUIT, and SIGHUP. + let mut s_int = signal(SignalKind::interrupt()).unwrap(); + let mut s_term = signal(SignalKind::terminate()).unwrap(); + let mut s_quit = signal(SignalKind::quit()).unwrap(); + let mut s_hup = signal(SignalKind::hangup()).unwrap(); + + // Return when any of the signals above is received. + async move { + tokio::select!( + _ = s_int.recv() => (), + _ = s_term.recv() => (), + _ = s_quit.recv() => (), + _ = s_hup.recv() => (), + ); + Ok(()) + } + } + #[cfg(not(target_family = "unix"))] + fn signal_listener() -> impl Future> { + tokio::signal::ctrl_c() + } + let node_clone = node.clone(); tokio::task::spawn(async move { - match tokio::signal::ctrl_c().await { + match signal_listener().await { Ok(()) => { + warn!("=========================================================================================="); + warn!("⚠️ Attention - Starting the graceful shutdown procedure (ETA: 30 seconds)..."); + warn!("⚠️ Attention - To avoid DATA CORRUPTION, do NOT interrupt snarkOS (or press Ctrl+C again)"); + warn!("⚠️ Attention - Please wait until the shutdown gracefully completes (ETA: 30 seconds)"); + warn!("=========================================================================================="); + match node_clone.get() { // If the node is already initialized, then shut it down. Some(node) => node.shut_down().await, diff --git a/node/src/validator/mod.rs b/node/src/validator/mod.rs index 3d9846b27f..3b618bcc67 100644 --- a/node/src/validator/mod.rs +++ b/node/src/validator/mod.rs @@ -16,7 +16,7 @@ mod router; use crate::traits::NodeInterface; use snarkos_account::Account; -use snarkos_node_bft::{helpers::init_primary_channels, ledger_service::CoreLedgerService}; +use snarkos_node_bft::{helpers::init_primary_channels, ledger_service::CoreLedgerService, spawn_blocking}; use snarkos_node_consensus::Consensus; use snarkos_node_rest::Rest; use snarkos_node_router::{ @@ -34,7 +34,7 @@ use snarkos_node_tcp::{ }; use snarkvm::prelude::{ block::{Block, Header}, - coinbase::ProverSolution, + puzzle::Solution, store::ConsensusStorage, Ledger, Network, @@ -83,17 +83,16 @@ impl> Validator { genesis: Block, cdn: Option, storage_mode: StorageMode, + allow_external_peers: bool, + dev_txs: bool, + shutdown: Arc, ) -> Result { - // Prepare the shutdown flag. - let shutdown: Arc = Default::default(); - // Initialize the signal handler. let signal_node = Self::handle_signals(shutdown.clone()); // Initialize the ledger. let ledger = Ledger::load(genesis, storage_mode.clone())?; - // TODO: Remove me after Phase 3. - let ledger = crate::phase_3_reset(ledger, storage_mode.clone())?; + // Initialize the CDN. if let Some(base_url) = cdn { // Sync the ledger with the CDN. @@ -125,6 +124,7 @@ impl> Validator { account, trusted_peers, Self::MAXIMUM_NUMBER_OF_PEERS as u16, + allow_external_peers, matches!(storage_mode, StorageMode::Development(_)), ) .await?; @@ -140,7 +140,7 @@ impl> Validator { shutdown, }; // Initialize the transaction pool. - node.initialize_transaction_pool(storage_mode)?; + node.initialize_transaction_pool(storage_mode, dev_txs)?; // Initialize the REST server. if let Some(rest_ip) = rest_ip { @@ -340,7 +340,7 @@ impl> Validator { // } /// Initialize the transaction pool. - fn initialize_transaction_pool(&self, storage_mode: StorageMode) -> Result<()> { + fn initialize_transaction_pool(&self, storage_mode: StorageMode, dev_txs: bool) -> Result<()> { use snarkvm::console::{ program::{Identifier, Literal, ProgramID, Value}, types::U64, @@ -354,25 +354,13 @@ impl> Validator { match storage_mode { // If the node is running in development mode, only generate if you are allowed. StorageMode::Development(id) => { - // If the node is not the first node, do not start the loop. - if id != 0 { - return Ok(()); - } - } - _ => { - // Retrieve the genesis committee. - let Ok(Some(committee)) = self.ledger.get_committee_for_round(0) else { - // If the genesis committee is not available, do not start the loop. - return Ok(()); - }; - // Retrieve the first member. - // Note: It is guaranteed that the committee has at least one member. - let first_member = committee.members().first().unwrap().0; - // If the node is not the first member, do not start the loop. - if self.address() != *first_member { + // If the node is not the first node, or if we should not create dev traffic, do not start the loop. + if id != 0 || !dev_txs { return Ok(()); } } + // If the node is not running in development mode, do not generate dev traffic. + _ => return Ok(()), } let self_ = self.clone(); @@ -387,15 +375,16 @@ impl> Validator { // Prepare the inputs. let inputs = [Value::from(Literal::Address(self_.address())), Value::from(Literal::U64(U64::new(1)))]; // Execute the transaction. - let transaction = match self_.ledger.vm().execute( - self_.private_key(), + let self__ = self_.clone(); + let transaction = match spawn_blocking!(self__.ledger.vm().execute( + self__.private_key(), locator, inputs.into_iter(), None, 10_000, None, &mut rand::thread_rng(), - ) { + )) { Ok(transaction) => transaction, Err(error) => { error!("Transaction pool encountered an execution error - {error}"); @@ -454,7 +443,7 @@ mod tests { use super::*; use snarkvm::prelude::{ store::{helpers::memory::ConsensusMemory, ConsensusStore}, - Testnet3, + MainnetV0, VM, }; @@ -463,16 +452,17 @@ mod tests { use rand_chacha::ChaChaRng; use std::str::FromStr; - type CurrentNetwork = Testnet3; + type CurrentNetwork = MainnetV0; /// Use `RUST_MIN_STACK=67108864 cargo test --release profiler --features timer` to run this test. #[ignore] #[tokio::test] async fn test_profiler() -> Result<()> { // Specify the node attributes. - let node = SocketAddr::from_str("0.0.0.0:4133").unwrap(); - let rest = SocketAddr::from_str("0.0.0.0:3033").unwrap(); + let node = SocketAddr::from_str("0.0.0.0:4130").unwrap(); + let rest = SocketAddr::from_str("0.0.0.0:3030").unwrap(); let storage_mode = StorageMode::Development(0); + let dev_txs = true; // Initialize an (insecure) fixed RNG. let mut rng = ChaChaRng::seed_from_u64(1234567890u64); @@ -496,6 +486,9 @@ mod tests { genesis, None, storage_mode, + false, + dev_txs, + Default::default(), ) .await .unwrap(); diff --git a/node/src/validator/router.rs b/node/src/validator/router.rs index 3a01e6046e..0ac3743ae1 100644 --- a/node/src/validator/router.rs +++ b/node/src/validator/router.rs @@ -27,7 +27,7 @@ use snarkos_node_router::messages::{ use snarkos_node_tcp::{Connection, ConnectionSide, Tcp}; use snarkvm::{ ledger::narwhal::Data, - prelude::{block::Transaction, coinbase::EpochChallenge, error, Network}, + prelude::{block::Transaction, error, Network}, }; use std::{io, net::SocketAddr, time::Duration}; @@ -137,6 +137,16 @@ impl> Outbound for Validator { fn router(&self) -> &Router { &self.router } + + /// Returns `true` if the node is synced up to the latest block (within the given tolerance). + fn is_block_synced(&self) -> bool { + self.sync.is_block_synced() + } + + /// Returns the number of blocks this node is behind the greatest peer height. + fn num_blocks_behind(&self) -> u32 { + self.sync.num_blocks_behind() + } } #[async_trait] @@ -209,11 +219,11 @@ impl> Inbound for Validator { true } - /// Retrieves the latest epoch challenge and latest block header, and returns the puzzle response to the peer. + /// Retrieves the latest epoch hash and latest block header, and returns the puzzle response to the peer. fn puzzle_request(&self, peer_ip: SocketAddr) -> bool { - // Retrieve the latest epoch challenge. - let epoch_challenge = match self.ledger.latest_epoch_challenge() { - Ok(epoch_challenge) => epoch_challenge, + // Retrieve the latest epoch hash. + let epoch_hash = match self.ledger.latest_epoch_hash() { + Ok(epoch_hash) => epoch_hash, Err(error) => { error!("Failed to prepare a puzzle request for '{peer_ip}': {error}"); return false; @@ -222,12 +232,12 @@ impl> Inbound for Validator { // Retrieve the latest block header. let block_header = Data::Object(self.ledger.latest_header()); // Send the `PuzzleResponse` message to the peer. - Outbound::send(self, peer_ip, Message::PuzzleResponse(PuzzleResponse { epoch_challenge, block_header })); + Outbound::send(self, peer_ip, Message::PuzzleResponse(PuzzleResponse { epoch_hash, block_header })); true } /// Disconnects on receipt of a `PuzzleResponse` message. - fn puzzle_response(&self, peer_ip: SocketAddr, _epoch_challenge: EpochChallenge, _header: Header) -> bool { + fn puzzle_response(&self, peer_ip: SocketAddr, _epoch_hash: N::BlockHash, _header: Header) -> bool { debug!("Disconnecting '{peer_ip}' for the following reason - {:?}", DisconnectReason::ProtocolViolation); false } @@ -237,7 +247,7 @@ impl> Inbound for Validator { &self, peer_ip: SocketAddr, serialized: UnconfirmedSolution, - solution: ProverSolution, + solution: Solution, ) -> bool { // Add the unconfirmed solution to the memory pool. if let Err(error) = self.consensus.add_unconfirmed_solution(solution).await { diff --git a/node/sync/Cargo.toml b/node/sync/Cargo.toml index a5f13d43d0..f2b309e43f 100644 --- a/node/sync/Cargo.toml +++ b/node/sync/Cargo.toml @@ -18,6 +18,7 @@ edition = "2021" [features] default = [ ] +metrics = [ "dep:metrics" ] test = [ "snarkos-node-sync-locators/test" ] [dependencies.anyhow] @@ -47,6 +48,16 @@ path = "../bft/ledger-service" version = "=2.2.7" features = [ "ledger-write" ] +[dependencies.metrics] +package = "snarkos-node-metrics" +path = "../metrics" +version = "=2.2.7" +optional = true + +[dependencies.snarkos-node-router] +path = "../router" +version = "=2.2.7" + [dependencies.snarkos-node-sync-communication-service] path = "communication-service" version = "=2.2.7" diff --git a/node/sync/locators/src/block_locators.rs b/node/sync/locators/src/block_locators.rs index de85c498e9..38bfe4c479 100644 --- a/node/sync/locators/src/block_locators.rs +++ b/node/sync/locators/src/block_locators.rs @@ -311,7 +311,7 @@ pub mod test_helpers { use super::*; use snarkvm::prelude::Field; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; /// Simulates a block locator at the given height. pub fn sample_block_locators(height: u32) -> BlockLocators { @@ -392,7 +392,7 @@ mod tests { use core::ops::Range; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; /// Simulates block locators for a ledger within the given `heights` range. fn check_is_valid(checkpoints: IndexMap::BlockHash>, heights: Range) { diff --git a/node/sync/src/block_sync.rs b/node/sync/src/block_sync.rs index aadb6873a8..525962ed0c 100644 --- a/node/sync/src/block_sync.rs +++ b/node/sync/src/block_sync.rs @@ -13,10 +13,11 @@ // limitations under the License. use crate::{ - helpers::{PeerPair, SyncRequest}, + helpers::{PeerPair, PrepareSyncRequest, SyncRequest}, locators::BlockLocators, }; use snarkos_node_bft_ledger_service::LedgerService; +use snarkos_node_router::messages::DataBlocks; use snarkos_node_sync_communication_service::CommunicationService; use snarkos_node_sync_locators::{CHECKPOINT_INTERVAL, NUM_RECENT_BLOCKS}; use snarkvm::prelude::{block::Block, Network}; @@ -30,26 +31,28 @@ use std::{ collections::BTreeMap, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU32, Ordering}, Arc, }, time::Instant, }; +#[cfg(not(test))] +pub const REDUNDANCY_FACTOR: usize = 1; +#[cfg(test)] pub const REDUNDANCY_FACTOR: usize = 3; -const EXTRA_REDUNDANCY_FACTOR: usize = REDUNDANCY_FACTOR * 2; +const EXTRA_REDUNDANCY_FACTOR: usize = REDUNDANCY_FACTOR * 3; const NUM_SYNC_CANDIDATE_PEERS: usize = REDUNDANCY_FACTOR * 5; -const BLOCK_REQUEST_TIMEOUT_IN_SECS: u64 = 60; // 60 seconds +const BLOCK_REQUEST_TIMEOUT_IN_SECS: u64 = 600; // 600 seconds const MAX_BLOCK_REQUESTS: usize = 50; // 50 requests -const MAX_BLOCK_REQUEST_TIMEOUTS: usize = 5; // 5 timeouts /// The maximum number of blocks tolerated before the primary is considered behind its peers. -pub const MAX_BLOCKS_BEHIND: u32 = 2; // blocks +pub const MAX_BLOCKS_BEHIND: u32 = 1; // blocks /// This is a dummy IP address that is used to represent the local node. /// Note: This here does not need to be a real IP address, but it must be unique/distinct from all other connections. -const DUMMY_SELF_IP: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); +pub const DUMMY_SELF_IP: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum BlockSyncMode { @@ -101,11 +104,10 @@ pub struct BlockSync { /// The map of block height to the timestamp of the last time the block was requested. /// This map is used to determine which requests to remove if they have been pending for too long. request_timestamps: Arc>>, - /// The map of (timed out) peer IPs to their request timestamps. - /// This map is used to determine which peers to remove if they have timed out too many times. - request_timeouts: Arc>>>, /// The boolean indicator of whether the node is synced up to the latest block (within the given tolerance). is_block_synced: Arc, + /// The number of blocks the peer is behind the greatest peer height. + num_blocks_behind: Arc, /// The lock to guarantee advance_with_sync_blocks() is called only once at a time. advance_with_sync_blocks_lock: Arc>, } @@ -121,8 +123,8 @@ impl BlockSync { requests: Default::default(), responses: Default::default(), request_timestamps: Default::default(), - request_timeouts: Default::default(), is_block_synced: Default::default(), + num_blocks_behind: Default::default(), advance_with_sync_blocks_lock: Default::default(), } } @@ -138,6 +140,12 @@ impl BlockSync { pub fn is_block_synced(&self) -> bool { self.is_block_synced.load(Ordering::SeqCst) } + + /// Returns the number of blocks the node is behind the greatest peer height. + #[inline] + pub fn num_blocks_behind(&self) -> u32 { + self.num_blocks_behind.load(Ordering::SeqCst) + } } #[allow(dead_code)] @@ -215,7 +223,7 @@ impl BlockSync { pub async fn try_block_sync(&self, communication: &C) { // Prepare the block requests, if any. // In the process, we update the state of `is_block_synced` for the sync module. - let block_requests = self.prepare_block_requests(); + let (block_requests, sync_peers) = self.prepare_block_requests(); trace!("Prepared {} block requests", block_requests.len()); // If there are no block requests, but there are pending block responses in the sync pool, @@ -233,18 +241,41 @@ impl BlockSync { } // Process the block requests. - 'outer: for (height, (hash, previous_hash, sync_ips)) in block_requests { - // Insert the block request into the sync pool. - if let Err(error) = self.insert_block_request(height, (hash, previous_hash, sync_ips.clone())) { - warn!("Block sync failed - {error}"); - // Break out of the loop. - break 'outer; + 'outer: for requests in block_requests.chunks(DataBlocks::::MAXIMUM_NUMBER_OF_BLOCKS as usize) { + // Retrieve the starting height and the sync IPs. + let (start_height, max_num_sync_ips) = match requests.first() { + Some((height, (_, _, max_num_sync_ips))) => (*height, *max_num_sync_ips), + None => { + warn!("Block sync failed - no block requests"); + break 'outer; + } + }; + + // Use a randomly sampled subset of the sync IPs. + let sync_ips: IndexSet<_> = sync_peers + .keys() + .copied() + .choose_multiple(&mut rand::thread_rng(), max_num_sync_ips) + .into_iter() + .collect(); + + // Calculate the end height. + let end_height = start_height.saturating_add(requests.len() as u32); + + // Insert the chunk of block requests. + for (height, (hash, previous_hash, _)) in requests.iter() { + // Insert the block request into the sync pool using the sync IPs from the last block request in the chunk. + if let Err(error) = self.insert_block_request(*height, (*hash, *previous_hash, sync_ips.clone())) { + warn!("Block sync failed - {error}"); + // Break out of the loop. + break 'outer; + } } /* Send the block request to the peers */ // Construct the message. - let message = C::prepare_block_request(height, height + 1); + let message = C::prepare_block_request(start_height, end_height); // Send the message to the peers. for sync_ip in sync_ips { let sender = communication.send(sync_ip, message.clone()).await; @@ -252,7 +283,9 @@ impl BlockSync { if sender.is_none() { warn!("Failed to send block request to peer '{sync_ip}'"); // Remove the entire block request from the sync pool. - self.remove_block_request(height); + for height in start_height..end_height { + self.remove_block_request(height); + } // Break out of the loop. break 'outer; } @@ -356,6 +389,9 @@ impl BlockSync { self.locators.write().insert(peer_ip, locators.clone()); // Compute the common ancestor with this node. + // Attention: Please do not optimize this loop, as it performs fork-detection. In addition, + // by iterating upwards, it also early-terminates malicious block locators at the *first* point + // of bifurcation in their ledger history, which is a critical safety guarantee provided here. let mut ancestor = 0; for (height, hash) in locators.clone().into_iter() { if let Ok(canon_hash) = self.canon.get_block_hash(height) { @@ -399,14 +435,13 @@ impl BlockSync { self.locators.write().swap_remove(peer_ip); // Remove all block requests to the peer. self.remove_block_requests_to_peer(peer_ip); - // Remove the timeouts for the peer. - self.request_timeouts.write().swap_remove(peer_ip); } } impl BlockSync { - /// Returns a list of block requests, if the node needs to sync. - fn prepare_block_requests(&self) -> Vec<(u32, SyncRequest)> { + /// Returns a list of block requests and the sync peers, if the node needs to sync. + #[allow(clippy::type_complexity)] + fn prepare_block_requests(&self) -> (Vec<(u32, PrepareSyncRequest)>, IndexMap>) { // Remove timed out block requests. self.remove_timed_out_block_requests(); // Prepare the block requests. @@ -416,12 +451,12 @@ impl BlockSync { // Update the state of `is_block_synced` for the sync module. self.update_is_block_synced(greatest_peer_height, MAX_BLOCKS_BEHIND); // Return the list of block requests. - self.construct_requests(sync_peers, min_common_ancestor, &mut rand::thread_rng()) + (self.construct_requests(&sync_peers, min_common_ancestor), sync_peers) } else { // Update the state of `is_block_synced` for the sync module. self.update_is_block_synced(0, MAX_BLOCKS_BEHIND); // Return an empty list of block requests. - Vec::new() + (Default::default(), Default::default()) } } @@ -436,8 +471,13 @@ impl BlockSync { let num_blocks_behind = greatest_peer_height.saturating_sub(canon_height); // Determine if the primary is synced. let is_synced = num_blocks_behind <= max_blocks_behind; + // Update the num blocks behind. + self.num_blocks_behind.store(num_blocks_behind, Ordering::SeqCst); // Update the sync status. self.is_block_synced.store(is_synced, Ordering::SeqCst); + // Update the `IS_SYNCED` metric. + #[cfg(feature = "metrics")] + metrics::gauge(metrics::bft::IS_SYNCED, is_synced); } /// Inserts a block request for the given height. @@ -625,8 +665,6 @@ impl BlockSync { // Retrieve the current time. let now = Instant::now(); - // Track each unique peer IP that has timed out. - let mut timeout_ips = IndexSet::new(); // Track the number of timed out block requests. let mut num_timed_out_block_requests = 0; @@ -643,10 +681,7 @@ impl BlockSync { // If the request has timed out, then remove it. if is_timeout { // Remove the request entry for the given height. - if let Some((_, _, sync_ips)) = requests.remove(height) { - // Add each sync IP to the timeout IPs. - timeout_ips.extend(sync_ips); - } + requests.remove(height); // Remove the response entry for the given height. responses.remove(height); // Increment the number of timed out block requests. @@ -656,16 +691,6 @@ impl BlockSync { !is_timeout }); - // If there are timeout IPs, then add them to the request timeouts map. - if !timeout_ips.is_empty() { - // Acquire the write lock on the request timeouts map. - let mut request_timeouts = self.request_timeouts.write(); - // Add each timeout IP to the request timeouts map. - for timeout_ip in timeout_ips { - request_timeouts.entry(timeout_ip).or_default().push(now); - } - } - num_timed_out_block_requests } @@ -674,21 +699,12 @@ impl BlockSync { // Retrieve the latest canon height. let latest_canon_height = self.canon.latest_block_height(); - // Compute the timeout frequency of each peer. - let timeouts = self - .request_timeouts - .read() - .iter() - .map(|(peer_ip, timestamps)| (*peer_ip, timestamps.len())) - .collect::>(); - // Pick a set of peers above the latest canon height, and include their locators. let candidate_locators: IndexMap<_, _> = self .locators .read() .iter() .filter(|(_, locators)| locators.latest_locator_height() > latest_canon_height) - .filter(|(ip, _)| timeouts.get(*ip).map(|count| *count < MAX_BLOCK_REQUEST_TIMEOUTS).unwrap_or(true)) .sorted_by(|(_, a), (_, b)| b.latest_locator_height().cmp(&a.latest_locator_height())) .take(NUM_SYNC_CANDIDATE_PEERS) .map(|(peer_ip, locators)| (*peer_ip, locators.clone())) @@ -749,39 +765,51 @@ impl BlockSync { return None; } + // Shuffle the sync peers prior to returning. This ensures the rest of the stack + // does not rely on the order of the sync peers, and that the sync peers are not biased. + let sync_peers = shuffle_indexmap(sync_peers, &mut rand::thread_rng()); + Some((sync_peers, min_common_ancestor)) } /// Given the sync peers and their minimum common ancestor, return a list of block requests. - fn construct_requests( + fn construct_requests( &self, - sync_peers: IndexMap>, + sync_peers: &IndexMap>, min_common_ancestor: u32, - rng: &mut R, - ) -> Vec<(u32, SyncRequest)> { + ) -> Vec<(u32, PrepareSyncRequest)> { // Retrieve the latest canon height. let latest_canon_height = self.canon.latest_block_height(); // If the minimum common ancestor is at or below the latest canon height, then return early. if min_common_ancestor <= latest_canon_height { - return vec![]; + return Default::default(); } // Compute the start height for the block request. let start_height = latest_canon_height + 1; // Compute the end height for the block request. - let end_height = (min_common_ancestor + 1).min(start_height + MAX_BLOCK_REQUESTS as u32); + let max_blocks_to_request = MAX_BLOCK_REQUESTS as u32 * DataBlocks::::MAXIMUM_NUMBER_OF_BLOCKS as u32; + let end_height = (min_common_ancestor + 1).min(start_height + max_blocks_to_request); - let mut requests = Vec::with_capacity((start_height..end_height).len()); + // Construct the block hashes to request. + let mut request_hashes = IndexMap::with_capacity((start_height..end_height).len()); + // Track the largest number of sync IPs required for any block request in the sequence of requests. + let mut max_num_sync_ips = 1; for height in start_height..end_height { // Ensure the current height is not canonized or already requested. if self.check_block_request(height).is_err() { - continue; + // If the sequence of block requests is interrupted, then return early. + // Otherwise, continue until the first start height that is new. + match request_hashes.is_empty() { + true => continue, + false => break, + } } // Construct the block request. - let (hash, previous_hash, num_sync_ips, is_honest) = construct_request(height, &sync_peers); + let (hash, previous_hash, num_sync_ips, is_honest) = construct_request(height, sync_peers); // Handle the dishonest case. if !is_honest { @@ -793,14 +821,18 @@ impl BlockSync { } } - // Pick the sync peers. - let sync_ips = sync_peers.keys().copied().choose_multiple(rng, num_sync_ips); + // Update the maximum number of sync IPs. + max_num_sync_ips = max_num_sync_ips.max(num_sync_ips); // Append the request. - requests.push((height, (hash, previous_hash, sync_ips.into_iter().collect()))); + request_hashes.insert(height, (hash, previous_hash)); } - requests + // Construct the requests with the same sync ips. + request_hashes + .into_iter() + .map(|(height, (hash, previous_hash))| (height, (hash, previous_hash, max_num_sync_ips))) + .collect() } } @@ -876,6 +908,18 @@ fn construct_request( (hash, previous_hash, num_sync_ips, is_honest) } +/// Shuffles a given `IndexMap` using the given random number generator. +fn shuffle_indexmap(mut map: IndexMap, rng: &mut R) -> IndexMap +where + K: core::hash::Hash + Eq + Clone, + V: Clone, +{ + use rand::seq::SliceRandom; + let mut pairs: Vec<_> = map.drain(..).collect(); // Drain elements to a vector + pairs.shuffle(rng); // Shuffle the vector of tuples + pairs.into_iter().collect() // Collect back into an IndexMap +} + #[cfg(test)] mod tests { use super::*; @@ -887,11 +931,11 @@ mod tests { use snarkos_node_bft_ledger_service::MockLedgerService; use snarkvm::prelude::{Field, TestRng}; - use indexmap::indexset; + use indexmap::{indexset, IndexSet}; use snarkvm::ledger::committee::Committee; use std::net::{IpAddr, Ipv4Addr}; - type CurrentNetwork = snarkvm::prelude::Testnet3; + type CurrentNetwork = snarkvm::prelude::MainnetV0; /// Returns the peer IP for the sync pool. fn sample_peer_ip(id: u16) -> SocketAddr { @@ -921,6 +965,8 @@ mod tests { min_common_ancestor: u32, peers: IndexSet, ) { + let rng = &mut TestRng::default(); + // Check test assumptions are met. assert_eq!(sync.canon.latest_block_height(), 0, "This test assumes the sync pool is at genesis"); @@ -937,7 +983,7 @@ mod tests { }; // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, sync_peers) = sync.prepare_block_requests(); // If there are no peers, then there should be no requests. if peers.is_empty() { @@ -949,7 +995,10 @@ mod tests { let expected_num_requests = core::cmp::min(min_common_ancestor as usize, MAX_BLOCK_REQUESTS); assert_eq!(requests.len(), expected_num_requests); - for (idx, (height, (hash, previous_hash, sync_ips))) in requests.into_iter().enumerate() { + for (idx, (height, (hash, previous_hash, num_sync_ips))) in requests.into_iter().enumerate() { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); assert_eq!(height, 1 + idx as u32); assert_eq!(hash, Some((Field::::from_u32(height)).into())); assert_eq!(previous_hash, Some((Field::::from_u32(height - 1)).into())); @@ -1039,20 +1088,21 @@ mod tests { sync.update_peer_locators(peer_3, sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, _) = sync.prepare_block_requests(); assert_eq!(requests.len(), 10); // Check the requests. - for (idx, (height, (hash, previous_hash, sync_ips))) in requests.into_iter().enumerate() { + for (idx, (height, (hash, previous_hash, num_sync_ips))) in requests.into_iter().enumerate() { assert_eq!(height, 1 + idx as u32); assert_eq!(hash, Some((Field::::from_u32(height)).into())); assert_eq!(previous_hash, Some((Field::::from_u32(height - 1)).into())); - assert_eq!(sync_ips.len(), 1); // Only 1 needed since we have redundancy factor on this (recent locator) hash. + assert_eq!(num_sync_ips, 1); // Only 1 needed since we have redundancy factor on this (recent locator) hash. } } #[test] fn test_prepare_block_requests_with_leading_fork_at_10() { + let rng = &mut TestRng::default(); let sync = sample_sync_at_height(0); // Intuitively, peer 1's fork is at peer 2 and peer 3's height. @@ -1081,7 +1131,7 @@ mod tests { sync.update_peer_locators(peer_3, sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, _) = sync.prepare_block_requests(); assert_eq!(requests.len(), 0); // When there are NUM_REDUNDANCY+1 peers ahead, and 1 is on a fork, then there should be block requests. @@ -1091,11 +1141,14 @@ mod tests { sync.update_peer_locators(peer_4, sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, sync_peers) = sync.prepare_block_requests(); assert_eq!(requests.len(), 10); // Check the requests. - for (idx, (height, (hash, previous_hash, sync_ips))) in requests.into_iter().enumerate() { + for (idx, (height, (hash, previous_hash, num_sync_ips))) in requests.into_iter().enumerate() { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); assert_eq!(height, 1 + idx as u32); assert_eq!(hash, Some((Field::::from_u32(height)).into())); assert_eq!(previous_hash, Some((Field::::from_u32(height - 1)).into())); @@ -1106,6 +1159,7 @@ mod tests { #[test] fn test_prepare_block_requests_with_trailing_fork_at_9() { + let rng = &mut TestRng::default(); let sync = sample_sync_at_height(0); // Peer 1 and 2 diverge from peer 3 at block 10. We only sync when there are NUM_REDUNDANCY peers @@ -1125,7 +1179,7 @@ mod tests { sync.update_peer_locators(peer_3, sample_block_locators_with_fork(20, 10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, _) = sync.prepare_block_requests(); assert_eq!(requests.len(), 0); // When there are NUM_REDUNDANCY+1 peers ahead, and peer 3 is on a fork, then there should be block requests. @@ -1135,11 +1189,14 @@ mod tests { sync.update_peer_locators(peer_4, sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, sync_peers) = sync.prepare_block_requests(); assert_eq!(requests.len(), 10); // Check the requests. - for (idx, (height, (hash, previous_hash, sync_ips))) in requests.into_iter().enumerate() { + for (idx, (height, (hash, previous_hash, num_sync_ips))) in requests.into_iter().enumerate() { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); assert_eq!(height, 1 + idx as u32); assert_eq!(hash, Some((Field::::from_u32(height)).into())); assert_eq!(previous_hash, Some((Field::::from_u32(height - 1)).into())); @@ -1150,16 +1207,20 @@ mod tests { #[test] fn test_insert_block_requests() { + let rng = &mut TestRng::default(); let sync = sample_sync_at_height(0); // Add a peer. sync.update_peer_locators(sample_peer_ip(1), sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, sync_peers) = sync.prepare_block_requests(); assert_eq!(requests.len(), 10); - for (height, (hash, previous_hash, sync_ips)) in requests.clone() { + for (height, (hash, previous_hash, num_sync_ips)) in requests.clone() { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); // Insert the block request. sync.insert_block_request(height, (hash, previous_hash, sync_ips.clone())).unwrap(); // Check that the block requests were inserted. @@ -1167,13 +1228,19 @@ mod tests { assert!(sync.get_block_request_timestamp(height).is_some()); } - for (height, (hash, previous_hash, sync_ips)) in requests.clone() { + for (height, (hash, previous_hash, num_sync_ips)) in requests.clone() { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); // Check that the block requests are still inserted. assert_eq!(sync.get_block_request(height), Some((hash, previous_hash, sync_ips))); assert!(sync.get_block_request_timestamp(height).is_some()); } - for (height, (hash, previous_hash, sync_ips)) in requests { + for (height, (hash, previous_hash, num_sync_ips)) in requests { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); // Ensure that the block requests cannot be inserted twice. sync.insert_block_request(height, (hash, previous_hash, sync_ips.clone())).unwrap_err(); // Check that the block requests are still inserted. @@ -1267,6 +1334,7 @@ mod tests { #[test] fn test_requests_insert_remove_insert() { + let rng = &mut TestRng::default(); let sync = sample_sync_at_height(0); // Add a peer. @@ -1274,10 +1342,13 @@ mod tests { sync.update_peer_locators(peer_ip, sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, sync_peers) = sync.prepare_block_requests(); assert_eq!(requests.len(), 10); - for (height, (hash, previous_hash, sync_ips)) in requests.clone() { + for (height, (hash, previous_hash, num_sync_ips)) in requests.clone() { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); // Insert the block request. sync.insert_block_request(height, (hash, previous_hash, sync_ips.clone())).unwrap(); // Check that the block requests were inserted. @@ -1295,17 +1366,20 @@ mod tests { } // As there is no peer, it should not be possible to prepare block requests. - let requests = sync.prepare_block_requests(); + let (requests, _) = sync.prepare_block_requests(); assert_eq!(requests.len(), 0); // Add the peer again. sync.update_peer_locators(peer_ip, sample_block_locators(10)).unwrap(); // Prepare the block requests. - let requests = sync.prepare_block_requests(); + let (requests, _) = sync.prepare_block_requests(); assert_eq!(requests.len(), 10); - for (height, (hash, previous_hash, sync_ips)) in requests { + for (height, (hash, previous_hash, num_sync_ips)) in requests { + // Construct the sync IPs. + let sync_ips: IndexSet<_> = + sync_peers.keys().choose_multiple(rng, num_sync_ips).into_iter().copied().collect(); // Insert the block request. sync.insert_block_request(height, (hash, previous_hash, sync_ips.clone())).unwrap(); // Check that the block requests were inserted. diff --git a/node/sync/src/helpers/mod.rs b/node/sync/src/helpers/mod.rs index 99f8b71e84..ce876fd12a 100644 --- a/node/sync/src/helpers/mod.rs +++ b/node/sync/src/helpers/mod.rs @@ -18,6 +18,9 @@ use core::hash::Hash; use indexmap::IndexSet; use std::net::SocketAddr; +/// A tuple of the block hash (optional), previous block hash (optional), and the number of sync IPS to request from. +pub type PrepareSyncRequest = (Option<::BlockHash>, Option<::BlockHash>, usize); + /// A tuple of the block hash (optional), previous block hash (optional), and sync IPs. pub type SyncRequest = (Option<::BlockHash>, Option<::BlockHash>, IndexSet); diff --git a/node/tests/common/mod.rs b/node/tests/common/mod.rs index 71d48c4900..7e5f65f68a 100644 --- a/node/tests/common/mod.rs +++ b/node/tests/common/mod.rs @@ -18,7 +18,7 @@ pub mod test_peer; use std::{env, str::FromStr}; use snarkos_account::Account; -use snarkvm::prelude::{block::Block, FromBytes, Network, Testnet3 as CurrentNetwork}; +use snarkvm::prelude::{block::Block, FromBytes, MainnetV0 as CurrentNetwork, Network}; /// Returns a fixed account. pub fn sample_account() -> Account { diff --git a/node/tests/common/node.rs b/node/tests/common/node.rs index 7a9c0bc110..c88e35d969 100644 --- a/node/tests/common/node.rs +++ b/node/tests/common/node.rs @@ -15,7 +15,7 @@ use crate::common::test_peer::sample_genesis_block; use snarkos_account::Account; use snarkos_node::{Client, Prover, Validator}; -use snarkvm::prelude::{store::helpers::memory::ConsensusMemory, Testnet3 as CurrentNetwork}; +use snarkvm::prelude::{store::helpers::memory::ConsensusMemory, MainnetV0 as CurrentNetwork}; use aleo_std::StorageMode; use std::str::FromStr; @@ -30,6 +30,7 @@ pub async fn client() -> Client> sample_genesis_block(), None, // No CDN. StorageMode::Production, + Default::default(), ) .await .expect("couldn't create client instance") @@ -42,6 +43,7 @@ pub async fn prover() -> Prover> &[], sample_genesis_block(), StorageMode::Production, + Default::default(), ) .await .expect("couldn't create prover instance") @@ -59,6 +61,9 @@ pub async fn validator() -> Validator) -> Self { let peer = Self { node: Node::new(Config { - listener_ip: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), max_connections: 200, + listener_addr: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0)), ..Default::default() }), node_type, @@ -113,7 +113,6 @@ impl TestPeer { } } -#[async_trait::async_trait] impl Handshake for TestPeer { async fn perform_handshake(&self, mut conn: Connection) -> io::Result { let rng = &mut TestRng::default(); @@ -140,10 +139,13 @@ impl Handshake for TestPeer { let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr); // Sign the nonce. - let signature = self.account().sign_bytes(&peer_request.nonce.to_le_bytes(), rng).unwrap(); + let response_nonce: u64 = rng.gen(); + let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat(); + let signature = self.account().sign_bytes(&data, rng).unwrap(); // Send the challenge response. - let our_response = ChallengeResponse { genesis_header, signature: Data::Object(signature) }; + let our_response = + ChallengeResponse { genesis_header, signature: Data::Object(signature), nonce: response_nonce }; framed.send(Message::ChallengeResponse(our_response)).await?; } ConnectionSide::Responder => { @@ -151,10 +153,13 @@ impl Handshake for TestPeer { let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr); // Sign the nonce. - let signature = self.account().sign_bytes(&peer_request.nonce.to_le_bytes(), rng).unwrap(); + let response_nonce: u64 = rng.gen(); + let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat(); + let signature = self.account().sign_bytes(&data, rng).unwrap(); // Send our challenge bundle. - let our_response = ChallengeResponse { genesis_header, signature: Data::Object(signature) }; + let our_response = + ChallengeResponse { genesis_header, signature: Data::Object(signature), nonce: response_nonce }; framed.send(Message::ChallengeResponse(our_response)).await?; let our_request = ChallengeRequest::new(local_ip.port(), self.node_type(), self.address(), rng.gen()); framed.send(Message::ChallengeRequest(our_request)).await?; @@ -168,7 +173,6 @@ impl Handshake for TestPeer { } } -#[async_trait::async_trait] impl Writing for TestPeer { type Codec = MessageCodec; type Message = Message; @@ -178,7 +182,6 @@ impl Writing for TestPeer { } } -#[async_trait::async_trait] impl Reading for TestPeer { type Codec = MessageCodec; type Message = Message; @@ -192,7 +195,6 @@ impl Reading for TestPeer { } } -#[async_trait::async_trait] -impl Disconnect for TestPeer { - async fn handle_disconnect(&self, _peer_addr: SocketAddr) {} +impl OnDisconnect for TestPeer { + async fn on_disconnect(&self, _peer_addr: SocketAddr) {} } diff --git a/node/tests/handshake.rs b/node/tests/handshake.rs index e513ba705f..3c4c214269 100644 --- a/node/tests/handshake.rs +++ b/node/tests/handshake.rs @@ -21,7 +21,7 @@ use common::{node::*, test_peer::TestPeer}; use snarkos_node::{Client, Prover, Validator}; use snarkos_node_router::Outbound; use snarkos_node_tcp::P2P; -use snarkvm::prelude::{store::helpers::memory::ConsensusMemory, Testnet3 as CurrentNetwork}; +use snarkvm::prelude::{store::helpers::memory::ConsensusMemory, MainnetV0 as CurrentNetwork}; use pea2pea::Pea2Pea; diff --git a/rust-toolchain b/rust-toolchain index 80627411dc..32a6ce3c71 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.74.1 +1.76.0 diff --git a/snarkos/main.rs b/snarkos/main.rs index 64cef2f182..61a86949b8 100644 --- a/snarkos/main.rs +++ b/snarkos/main.rs @@ -17,10 +17,10 @@ use snarkos_cli::{commands::CLI, helpers::Updater}; use clap::Parser; use std::process::exit; -#[cfg(feature = "jemalloc")] +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] use tikv_jemallocator::Jemalloc; -#[cfg(feature = "jemalloc")] +#[cfg(all(target_os = "linux", target_arch = "x86_64"))] #[global_allocator] static GLOBAL: Jemalloc = Jemalloc;