mod testnet72;
mod testnet74;
use anyhow::Context;
use futures::StreamExt as _;
use std::path::{Path, PathBuf};
use tracing::instrument;
use cnidarium::{StateDelta, StateRead, StateWrite, Storage};
use jmt::RootHash;
use penumbra_app::{app::StateReadExt, SUBSTORE_PREFIXES};
use penumbra_sct::component::clock::{EpochManager, EpochRead};
use crate::testnet::generate::TestnetConfig;
use flate2::write::GzEncoder;
use flate2::Compression;
use std::fs::File;
pub enum Migration {
Noop,
SimpleMigration,
Testnet70,
Testnet72,
Testnet74,
}
impl Migration {
pub async fn migrate(
&self,
path_to_export: PathBuf,
genesis_start: Option<tendermint::time::Time>,
) -> anyhow::Result<()> {
match self {
Migration::Noop => Ok(()),
Migration::SimpleMigration => {
let rocksdb_dir = path_to_export.join("rocksdb");
let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
let export_state = storage.latest_snapshot();
let root_hash = export_state.root_hash().await.expect("can get root hash");
let app_hash_pre_migration: RootHash = root_hash.into();
let height = export_state
.get_block_height()
.await
.expect("can get block height");
let post_ugprade_height = height.wrapping_add(1);
tracing::info!(?app_hash_pre_migration, "app hash pre-upgrade");
let mut delta = StateDelta::new(export_state);
delta.put_raw(
"banana".to_string(),
"a good fruit (and migration works!)".into(),
);
delta.put_block_height(0u64);
let root_hash = storage.commit_in_place(delta).await?;
let app_hash_post_migration: RootHash = root_hash.into();
tracing::info!(?app_hash_post_migration, "app hash post upgrade");
tracing::info!("generating genesis");
let migrated_state = storage.latest_snapshot();
let root_hash = migrated_state.root_hash().await.expect("can get root hash");
let app_hash: RootHash = root_hash.into();
tracing::info!(?root_hash, "root hash from snapshot (post-upgrade)");
let chain_id = migrated_state.get_chain_id().await?;
let app_state = penumbra_app::genesis::Content {
chain_id,
..Default::default()
};
let mut genesis =
TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis");
genesis.app_hash = app_hash
.0
.to_vec()
.try_into()
.expect("infaillible conversion");
genesis.initial_height = post_ugprade_height as i64;
genesis.genesis_time = genesis_start.unwrap_or_else(|| {
let now = tendermint::time::Time::now();
tracing::info!(%now, "no genesis time provided, detecting a testing setup");
now
});
let checkpoint = app_hash.0.to_vec();
let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint));
let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis");
tracing::info!("genesis: {}", genesis_json);
let genesis_path = path_to_export.join("genesis.json");
std::fs::write(genesis_path, genesis_json).expect("can write genesis");
let validator_state_path = path_to_export.join("priv_validator_state.json");
let fresh_validator_state =
crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state)
.expect("can write validator state");
Ok(())
}
Migration::Testnet70 => {
let start_time = std::time::SystemTime::now();
let rocksdb_dir = path_to_export.join("rocksdb");
let storage =
Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?;
let export_state = storage.latest_snapshot();
let root_hash = export_state.root_hash().await.expect("can get root hash");
let pre_upgrade_root_hash: RootHash = root_hash.into();
let pre_upgrade_height = export_state
.get_block_height()
.await
.expect("can get block height");
let post_upgrade_height = pre_upgrade_height.wrapping_add(1);
let mut delta = StateDelta::new(export_state);
let prefix_key = "dex/swap_execution/";
let mut swap_execution_stream = delta.prefix_raw(prefix_key);
while let Some(r) = swap_execution_stream.next().await {
let (key, swap_execution) = r?;
tracing::info!("migrating swap execution: {}", key);
delta.nonverifiable_put_raw(key.into_bytes(), swap_execution);
}
delta.put_block_height(0u64);
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");
let migration_duration = start_time.elapsed().expect("start time not set");
storage.release().await;
let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
let migrated_state = storage.latest_snapshot();
storage.release().await;
let chain_id = migrated_state.get_chain_id().await?;
let app_state = penumbra_app::genesis::Content {
chain_id,
..Default::default()
};
let mut genesis =
TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis");
genesis.app_hash = post_upgrade_root_hash
.0
.to_vec()
.try_into()
.expect("infaillible conversion");
genesis.initial_height = post_upgrade_height as i64;
genesis.genesis_time = genesis_start.unwrap_or_else(|| {
let now = tendermint::time::Time::now();
tracing::info!(%now, "no genesis time provided, detecting a testing setup");
now
});
let checkpoint = post_upgrade_root_hash.0.to_vec();
let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint));
let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis");
tracing::info!("genesis: {}", genesis_json);
let genesis_path = path_to_export.join("genesis.json");
std::fs::write(genesis_path, genesis_json).expect("can write genesis");
let validator_state_path = path_to_export.join("priv_validator_state.json");
let fresh_validator_state =
crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state)
.expect("can write validator state");
tracing::info!(
pre_upgrade_height,
post_upgrade_height,
?pre_upgrade_root_hash,
?post_upgrade_root_hash,
duration = migration_duration.as_secs(),
"successful migration!"
);
Ok(())
}
Migration::Testnet72 => testnet72::migrate(path_to_export, genesis_start).await,
Migration::Testnet74 => testnet74::migrate(path_to_export, genesis_start).await,
}
}
}
pub fn archive_directory(
src_directory: PathBuf,
archive_filepath: PathBuf,
subdir_within_archive: Option<String>,
) -> anyhow::Result<()> {
if archive_filepath.exists() {
tracing::error!(
"export archive filepath already exists: {}",
archive_filepath.display()
);
anyhow::bail!("refusing to overwrite existing archive");
}
tracing::info!(
"creating archive {} -> {}",
src_directory.display(),
archive_filepath.display()
);
let tarball_file = File::create(&archive_filepath)
.context("failed to create file for archive: check parent directory and permissions")?;
let enc = GzEncoder::new(tarball_file, Compression::default());
let mut tarball = tar::Builder::new(enc);
let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
tarball
.append_dir_all(subdir_within_archive, src_directory.as_path())
.context("failed to package archive contents")?;
Ok(())
}
pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
let rocksdb = home.join("rocksdb");
let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
.await
.context("error loading store for timestamp")?;
let state = storage.latest_snapshot();
let last_block_time = state
.get_block_timestamp()
.await
.context("error reading latest block timestamp")?;
storage.release().await;
Ok(last_block_time)
}
#[instrument(skip_all)]
pub async fn migrate_comet_data(
comet_home: PathBuf,
new_genesis_file: PathBuf,
) -> anyhow::Result<()> {
tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
let genesis_contents =
std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
let genesis_json: serde_json::Value =
serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
tracing::info!(?genesis_json, "parsed genesis file");
let initial_height = genesis_json["initial_height"]
.as_str()
.context("error reading initial_height from genesis file")?
.parse::<u64>()?;
let genesis_file = comet_home.join("config").join("genesis.json");
tracing::info!(?genesis_file, "writing genesis file to comet config");
std::fs::write(genesis_file, genesis_contents)
.context("error writing genesis file to comet config")?;
adjust_priv_validator_state(&comet_home, initial_height)?;
clear_comet_data(&comet_home)?;
Ok(())
}
#[instrument(skip_all)]
fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
let current_state: serde_json::Value =
serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
let current_height = current_state["height"]
.as_str()
.context("error reading height from priv_validator_state.json")?
.parse::<u64>()?;
if current_height < initial_height {
tracing::info!(
"increasing height in priv_validator_state from {} to {}",
current_height,
initial_height
);
let new_state = serde_json::json!({
"height": initial_height.to_string(), "round": 0,
"step": 0,
});
tracing::info!(?new_state, "updated priv_validator_state.json");
std::fs::write(
&priv_validator_state,
&serde_json::to_string_pretty(&new_state)?,
)?;
} else {
anyhow::bail!(
"priv_validator_state height {} is already greater than or equal to initial_height {}",
current_height,
initial_height
);
}
Ok(())
}
#[instrument(skip_all)]
fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
let data_dir = comet_home.join("data");
for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
let path = data_dir.join(subdir);
if path.exists() {
tracing::info!(?path, "removing file");
std::fs::remove_dir_all(path)?;
}
}
Ok(())
}