Skip to content

Commit

Permalink
feat(nodes): encrypt all records before disk, decrypt on get
Browse files Browse the repository at this point in the history
  • Loading branch information
joshuef committed Jan 29, 2024
1 parent 3064a9a commit 23d4f49
Show file tree
Hide file tree
Showing 3 changed files with 95 additions and 27 deletions.
16 changes: 16 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions sn_networking/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ tokio = { version = "1.32.0", features = ["io-util", "macros", "rt", "sync", "ti
tracing = { version = "~0.1.26" }
xor_name = "5.0.0"
backoff = { version = "0.4.0", features = ["tokio"] }
aes-gcm-siv = "0.11.1"

[dev-dependencies]
bls = { package = "blsttc", version = "8.0.1" }
Expand Down
105 changes: 78 additions & 27 deletions sn_networking/src/record_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@

use crate::target_arch::{spawn, Instant};
use crate::{cmd::SwarmCmd, event::NetworkEvent, send_swarm_cmd};
use aes_gcm_siv::aead::generic_array::GenericArray;
use aes_gcm_siv::{
aead::{Aead, KeyInit, OsRng},
Aes256GcmSiv,
};
use libp2p::{
identity::PeerId,
kad::{
Expand All @@ -18,6 +23,7 @@ use libp2p::{
};
#[cfg(feature = "open-metrics")]
use prometheus_client::metrics::gauge::Gauge;
use rand::RngCore;
use sn_protocol::{
storage::{RecordHeader, RecordKind, RecordType},
NetworkAddress, PrettyPrintRecordKey,
Expand Down Expand Up @@ -56,6 +62,8 @@ pub struct NodeRecordStore {
record_count_metric: Option<Gauge>,
/// Counting how many times got paid
received_payment_count: usize,
/// Encyption key for the records, randomly generated at node startup
encryption_details: (Aes256GcmSiv, [u8; 64]),
}

/// Configuration for a `DiskBackedRecordStore`.
Expand Down Expand Up @@ -87,6 +95,10 @@ impl NodeRecordStore {
network_event_sender: mpsc::Sender<NetworkEvent>,
swarm_cmd_sender: mpsc::Sender<SwarmCmd>,
) -> Self {
let key = Aes256GcmSiv::generate_key(&mut OsRng);
let cipher = Aes256GcmSiv::new(&key);
let mut nonce_starter = [0u8; 64];
OsRng.fill_bytes(&mut nonce_starter);
NodeRecordStore {
local_key: KBucketKey::from(local_id),
config,
Expand All @@ -97,6 +109,7 @@ impl NodeRecordStore {
#[cfg(feature = "open-metrics")]
record_count_metric: None,
received_payment_count: 0,
encryption_details: (cipher, nonce_starter),
}
}

Expand All @@ -117,26 +130,48 @@ impl NodeRecordStore {
hex_string
}

fn read_from_disk<'a>(key: &Key, storage_dir: &Path) -> Option<Cow<'a, Record>> {
fn read_from_disk<'a>(
encryption_details: &(Aes256GcmSiv, [u8; 64]),
key: &Key,
storage_dir: &Path,
) -> Option<Cow<'a, Record>> {
let (cipher, nonce_starter) = encryption_details;

let start = Instant::now();
let filename = Self::key_to_hex(key);
let file_path = storage_dir.join(&filename);

// We want to end up with a 96 byte nonce, appending the bytes of the address to the nonce_starter
let key_bytes = key.as_ref();

let mut nonce_bytes = nonce_starter.to_vec();
nonce_bytes.extend_from_slice(key_bytes);
// Ensure the final nonce is exactly 96 bytes long by padding or truncating as necessary
// https://crypto.stackexchange.com/questions/26790/how-bad-it-is-using-the-same-iv-twice-with-aes-gcm
nonce_bytes.resize(96, 0);
let nonce = GenericArray::from_slice(&nonce_bytes);

// we should only be reading if we know the record is written to disk properly
match fs::read(file_path) {
Ok(value) => {
Ok(ciphertext) => {
// vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues):
info!(
"Retrieved record from disk! filename: {filename} after {:?}",
start.elapsed()
);
let record = Record {
key: key.clone(),
value,
publisher: None,
expires: None,
};
Some(Cow::Owned(record))

if let Ok(value) = cipher.decrypt(nonce, ciphertext.as_ref()) {
let record = Record {
key: key.clone(),
value,
publisher: None,
expires: None,
};
return Some(Cow::Owned(record));
} else {
error!("Error while decrypting record. filename: {filename}");
None
}
}
Err(err) => {
error!("Error while reading file. filename: {filename}, error: {err:?}");
Expand Down Expand Up @@ -252,27 +287,39 @@ impl NodeRecordStore {
let _ = metric.set(self.records.len() as i64);
}

let (cipher, nonce_starter) = self.encryption_details.clone();

let cloned_cmd_sender = self.swarm_cmd_sender.clone();
spawn(async move {
let cmd = match fs::write(&file_path, r.value) {
Ok(_) => {
// vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues):
info!("Wrote record {record_key:?} to disk! filename: {filename}");

SwarmCmd::AddLocalRecordAsStored {
key: r.key,
record_type,
let mut nonce_bytes = nonce_starter.to_vec();
nonce_bytes.extend_from_slice(r.key.as_ref());
// Ensure the final nonce is exactly 96 bytes long by padding or truncating as necessary
// https://crypto.stackexchange.com/questions/26790/how-bad-it-is-using-the-same-iv-twice-with-aes-gcm
nonce_bytes.resize(96, 0);
let nonce = GenericArray::from_slice(&nonce_bytes);
if let Ok(value) = cipher.encrypt(&nonce, r.value.as_ref()) {
let cmd = match fs::write(&file_path, value) {
Ok(_) => {
// vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues):
info!("Wrote record {record_key:?} to disk! filename: {filename}");

SwarmCmd::AddLocalRecordAsStored {
key: r.key,
record_type,
}
}
}
Err(err) => {
error!(
Err(err) => {
error!(
"Error writing record {record_key:?} filename: {filename}, error: {err:?}"
);
SwarmCmd::RemoveFailedLocalRecord { key: r.key }
}
};
SwarmCmd::RemoveFailedLocalRecord { key: r.key }
}
};

send_swarm_cmd(cloned_cmd_sender, cmd);
send_swarm_cmd(cloned_cmd_sender, cmd);
} else {
warn!("Failed to encrypt record {record_key:?} filename: {filename}");
}
});

Ok(())
Expand Down Expand Up @@ -341,7 +388,7 @@ impl RecordStore for NodeRecordStore {

debug!("GET request for Record key: {key}");

Self::read_from_disk(k, &self.config.storage_dir)
Self::read_from_disk(&self.encryption_details, k, &self.config.storage_dir)
}

fn put(&mut self, record: Record) -> Result<()> {
Expand Down Expand Up @@ -732,8 +779,12 @@ mod tests {
// Confirm the pruned_key got removed, looping to allow async disk ops to complete.
let mut iteration = 0;
while iteration < max_iterations {
if NodeRecordStore::read_from_disk(&pruned_key, &store_config.storage_dir)
.is_none()
if NodeRecordStore::read_from_disk(
&store.encryption_details,
&pruned_key,
&store_config.storage_dir,
)
.is_none()
{
break;
}
Expand Down

0 comments on commit 23d4f49

Please sign in to comment.