1mod mainnet1;
8mod mainnet2;
9mod reset_halt_bit;
10mod simple;
11mod testnet72;
12mod testnet74;
13mod testnet76;
14mod testnet77;
15mod testnet78;
16
17use anyhow::{ensure, Context};
18use penumbra_sdk_governance::StateReadExt;
19use penumbra_sdk_sct::component::clock::EpochRead;
20use std::path::{Path, PathBuf};
21use tracing::instrument;
22
23use cnidarium::Storage;
24use penumbra_sdk_app::SUBSTORE_PREFIXES;
25
26use flate2::write::GzEncoder;
27use flate2::Compression;
28use std::fs::File;
29
30#[derive(Debug)]
32pub enum Migration {
33 ReadyToStart,
35 SimpleMigration,
38 Testnet72,
41 Testnet74,
46 Testnet76,
50 Testnet77,
53 Testnet78,
57 Mainnet1,
60 Mainnet2,
63}
64
65impl Migration {
66 #[instrument(skip(pd_home, genesis_start, force))]
67 pub async fn migrate(
68 &self,
69 pd_home: PathBuf,
70 comet_home: Option<PathBuf>,
71 genesis_start: Option<tendermint::time::Time>,
72 force: bool,
73 ) -> anyhow::Result<()> {
74 tracing::debug!(
75 ?pd_home,
76 ?genesis_start,
77 ?force,
78 "preparing to run migration!"
79 );
80 let rocksdb_dir = pd_home.join("rocksdb");
81 let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
82 ensure!(
83 storage.latest_snapshot().is_chain_halted().await || force,
84 "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
85 );
86
87 let latest_version = storage.latest_version();
89 let block_height = storage.latest_snapshot().get_block_height().await?;
90 ensure!(
91 latest_version == block_height || force,
92 "local chain state version is corrupted: {} != {}",
93 latest_version,
94 block_height
95 );
96
97 tracing::info!("started migration");
98
99 if let Migration::ReadyToStart = self {
101 reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
102 return Ok(());
103 }
104
105 match self {
106 Migration::SimpleMigration => {
107 simple::migrate(storage, pd_home.clone(), genesis_start).await?
108 }
109 Migration::Mainnet1 => {
110 mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
111 }
112 Migration::Mainnet2 => {
113 mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
114 }
115 _ => unimplemented!("the specified migration is unimplemented"),
118 }
119
120 if let Some(comet_home) = comet_home {
121 let genesis_path = pd_home.join("genesis.json");
122 migrate_comet_data(comet_home, genesis_path).await?;
123 }
124
125 Ok(())
126 }
127}
128
129pub fn archive_directory(
132 src_directory: PathBuf,
133 archive_filepath: PathBuf,
134 subdir_within_archive: Option<String>,
135) -> anyhow::Result<()> {
136 if archive_filepath.exists() {
138 tracing::error!(
139 "export archive filepath already exists: {}",
140 archive_filepath.display()
141 );
142 anyhow::bail!("refusing to overwrite existing archive");
143 }
144
145 tracing::info!(
146 "creating archive {} -> {}",
147 src_directory.display(),
148 archive_filepath.display()
149 );
150 let tarball_file = File::create(&archive_filepath)
151 .context("failed to create file for archive: check parent directory and permissions")?;
152 let enc = GzEncoder::new(tarball_file, Compression::default());
153 let mut tarball = tar::Builder::new(enc);
154 let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
155 tarball
156 .append_dir_all(subdir_within_archive, src_directory.as_path())
157 .context("failed to package archive contents")?;
158 Ok(())
159}
160
161pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
163 let rocksdb = home.join("rocksdb");
164 let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
165 .await
166 .context("error loading store for timestamp")?;
167 let state = storage.latest_snapshot();
168 let last_block_time = state
169 .get_current_block_timestamp()
170 .await
171 .context("error reading latest block timestamp")?;
172 storage.release().await;
173 Ok(last_block_time)
174}
175
176#[instrument(skip_all)]
177pub async fn migrate_comet_data(
178 comet_home: PathBuf,
179 new_genesis_file: PathBuf,
180) -> anyhow::Result<()> {
181 tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
182
183 let genesis_contents =
185 std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
186 let genesis_json: serde_json::Value =
187 serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
188 tracing::info!(?genesis_json, "parsed genesis file");
189 let initial_height = genesis_json["initial_height"]
190 .as_str()
191 .context("error reading initial_height from genesis file")?
192 .parse::<u64>()?;
193
194 let genesis_file = comet_home.join("config").join("genesis.json");
196 tracing::info!(?genesis_file, "writing genesis file to comet config");
197 std::fs::write(genesis_file, genesis_contents)
198 .context("error writing genesis file to comet config")?;
199
200 adjust_priv_validator_state(&comet_home, initial_height)?;
202
203 clear_comet_data(&comet_home)?;
205
206 Ok(())
207}
208
209#[instrument(skip_all)]
210fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
211 let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
212 let current_state: serde_json::Value =
213 serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
214
215 let current_height = current_state["height"]
216 .as_str()
217 .context("error reading height from priv_validator_state.json")?
218 .parse::<u64>()?;
219 if current_height < initial_height {
220 tracing::info!(
221 "increasing height in priv_validator_state from {} to {}",
222 current_height,
223 initial_height
224 );
225 let new_state = serde_json::json!({
226 "height": initial_height.to_string(), "round": 0,
228 "step": 0,
229 });
230 tracing::info!(?new_state, "updated priv_validator_state.json");
231 std::fs::write(
232 &priv_validator_state,
233 &serde_json::to_string_pretty(&new_state)?,
234 )?;
235 } else {
236 anyhow::bail!(
237 "priv_validator_state height {} is already greater than or equal to initial_height {}",
238 current_height,
239 initial_height
240 );
241 }
242
243 Ok(())
244}
245
246#[instrument(skip_all)]
247fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
248 let data_dir = comet_home.join("data");
249
250 for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
255 let path = data_dir.join(subdir);
256 if path.exists() {
257 tracing::info!(?path, "removing file");
258 std::fs::remove_dir_all(path)?;
259 }
260 }
261
262 Ok(())
263}