1mod mainnet1;
8mod mainnet2;
9mod mainnet3;
10mod mainnet4;
11mod migrate2;
12mod reset_halt_bit;
13mod simple;
14mod testnet72;
15mod testnet74;
16mod testnet76;
17mod testnet77;
18mod testnet78;
19
20use anyhow::{ensure, Context};
21use penumbra_sdk_governance::StateReadExt;
22use penumbra_sdk_sct::component::clock::EpochRead;
23use std::path::{Path, PathBuf};
24use tracing::instrument;
25
26use migrate2::Migration as MigrationTrait;
27
28use cnidarium::Storage;
29use penumbra_sdk_app::SUBSTORE_PREFIXES;
30
31use flate2::write::GzEncoder;
32use flate2::Compression;
33use std::fs::File;
34
35#[derive(Debug)]
37pub enum Migration {
38 ReadyToStart,
40 SimpleMigration,
43 Testnet72,
46 Testnet74,
51 Testnet76,
55 Testnet77,
58 Testnet78,
62 Mainnet1,
65 Mainnet2,
68 Mainnet3,
71 Mainnet4,
76 Mainnet5,
81 IbcClientRecovery,
84 NoOp,
87}
88
89impl Migration {
90 #[instrument(skip(pd_home, genesis_start, force))]
91 pub async fn migrate(
92 &self,
93 pd_home: PathBuf,
94 comet_home: Option<PathBuf>,
95 genesis_start: Option<tendermint::time::Time>,
96 force: bool,
97 ) -> anyhow::Result<()> {
98 self.migrate_with_params(pd_home, comet_home, genesis_start, force, vec![])
99 .await
100 }
101
102 #[instrument(skip(pd_home, genesis_start, force, params))]
103 pub async fn migrate_with_params(
104 &self,
105 pd_home: PathBuf,
106 comet_home: Option<PathBuf>,
107 genesis_start: Option<tendermint::time::Time>,
108 force: bool,
109 params: Vec<String>,
110 ) -> anyhow::Result<()> {
111 tracing::debug!(
112 ?pd_home,
113 ?genesis_start,
114 ?force,
115 "preparing to run migration!"
116 );
117 let rocksdb_dir = pd_home.join("rocksdb");
118 let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
119 ensure!(
120 storage.latest_snapshot().is_chain_halted().await || force,
121 "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
122 );
123
124 let latest_version = storage.latest_version();
126 let block_height = storage.latest_snapshot().get_block_height().await?;
127 ensure!(
128 latest_version == block_height || force,
129 "local chain state version is corrupted: {} != {}",
130 latest_version,
131 block_height
132 );
133
134 tracing::info!("started migration");
135
136 match self {
141 Migration::SimpleMigration => {
142 simple::migrate(storage, pd_home.clone(), genesis_start).await?
143 }
144 Migration::Mainnet1 => {
145 mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
146 }
147 Migration::Mainnet2 => {
148 mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
149 }
150 Migration::Mainnet3 => {
151 mainnet3::migrate(storage, pd_home.clone(), genesis_start).await?;
152 }
153 Migration::Mainnet4 => {
154 mainnet4::migrate(storage, pd_home.clone(), genesis_start).await?;
155 }
156 Migration::ReadyToStart => {
157 reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
158 return Ok(());
160 }
161 Migration::IbcClientRecovery => {
162 storage.release().await;
163 ensure!(
164 params.len() >= 2,
165 "IBC client recovery requires at least old and new client IDs"
166 );
167 let old_client_id = params[0].clone();
169 let new_client_id = params[1].clone();
170
171 let app_version = if params.len() >= 3 && !params[2].is_empty() {
173 Some(
174 params[2]
175 .parse::<u64>()
176 .context("app_version must be a valid u64")?,
177 )
178 } else {
179 None
180 };
181
182 let migration = migrate2::ibc_client_recovery::IbcClientRecoveryMigration::new(
183 old_client_id,
184 new_client_id,
185 app_version,
186 );
187 migration
188 .run(pd_home.clone(), comet_home.clone(), genesis_start)
189 .await?;
190 return Ok(());
192 }
193 Migration::NoOp => {
194 storage.release().await;
195
196 let app_version = if !params.is_empty() && !params[0].is_empty() {
198 Some(
199 params[0]
200 .parse::<u64>()
201 .context("app_version must be a valid u64")?,
202 )
203 } else {
204 None
205 };
206
207 let migration = migrate2::noop::NoOpMigration::new(app_version);
208 migration
209 .run(pd_home.clone(), comet_home.clone(), genesis_start)
210 .await?;
211 return Ok(());
213 }
214 _ => unimplemented!("the specified migration is unimplemented"),
217 }
218
219 if let Some(comet_home) = comet_home {
220 let genesis_path = pd_home.join("genesis.json");
221 migrate_comet_data(comet_home, genesis_path).await?;
222 }
223
224 Ok(())
225 }
226}
227
228pub fn archive_directory(
231 src_directory: PathBuf,
232 archive_filepath: PathBuf,
233 subdir_within_archive: Option<String>,
234) -> anyhow::Result<()> {
235 if archive_filepath.exists() {
237 tracing::error!(
238 "export archive filepath already exists: {}",
239 archive_filepath.display()
240 );
241 anyhow::bail!("refusing to overwrite existing archive");
242 }
243
244 tracing::info!(
245 "creating archive {} -> {}",
246 src_directory.display(),
247 archive_filepath.display()
248 );
249 let tarball_file = File::create(&archive_filepath)
250 .context("failed to create file for archive: check parent directory and permissions")?;
251 let enc = GzEncoder::new(tarball_file, Compression::default());
252 let mut tarball = tar::Builder::new(enc);
253 let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
254 tarball
255 .append_dir_all(subdir_within_archive, src_directory.as_path())
256 .context("failed to package archive contents")?;
257 Ok(())
258}
259
260pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
262 let rocksdb = home.join("rocksdb");
263 let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
264 .await
265 .context("error loading store for timestamp")?;
266 let state = storage.latest_snapshot();
267 let last_block_time = state
268 .get_current_block_timestamp()
269 .await
270 .context("error reading latest block timestamp")?;
271 storage.release().await;
272 Ok(last_block_time)
273}
274
275#[instrument(skip_all)]
276pub async fn migrate_comet_data(
277 comet_home: PathBuf,
278 new_genesis_file: PathBuf,
279) -> anyhow::Result<()> {
280 tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
281
282 let genesis_contents =
284 std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
285 let genesis_json: serde_json::Value =
286 serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
287 tracing::info!(?genesis_json, "parsed genesis file");
288 let initial_height = genesis_json["initial_height"]
289 .as_str()
290 .context("error reading initial_height from genesis file")?
291 .parse::<u64>()?;
292
293 let genesis_file = comet_home.join("config").join("genesis.json");
295 tracing::info!(?genesis_file, "writing genesis file to comet config");
296 std::fs::write(genesis_file, genesis_contents)
297 .context("error writing genesis file to comet config")?;
298
299 adjust_priv_validator_state(&comet_home, initial_height)?;
301
302 clear_comet_data(&comet_home)?;
304
305 Ok(())
306}
307
308#[instrument(skip_all)]
309fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
310 let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
311 let current_state: serde_json::Value =
312 serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
313
314 let current_height = current_state["height"]
315 .as_str()
316 .context("error reading height from priv_validator_state.json")?
317 .parse::<u64>()?;
318 if current_height < initial_height {
319 tracing::info!(
320 "increasing height in priv_validator_state from {} to {}",
321 current_height,
322 initial_height
323 );
324 let new_state = serde_json::json!({
325 "height": initial_height.to_string(), "round": 0,
327 "step": 0,
328 });
329 tracing::info!(?new_state, "updated priv_validator_state.json");
330 std::fs::write(
331 &priv_validator_state,
332 &serde_json::to_string_pretty(&new_state)?,
333 )?;
334 } else {
335 anyhow::bail!(
336 "priv_validator_state height {} is already greater than or equal to initial_height {}",
337 current_height,
338 initial_height
339 );
340 }
341
342 Ok(())
343}
344
345#[instrument(skip_all)]
346fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
347 let data_dir = comet_home.join("data");
348
349 for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
354 let path = data_dir.join(subdir);
355 if path.exists() {
356 tracing::info!(?path, "removing file");
357 std::fs::remove_dir_all(path)?;
358 }
359 }
360
361 Ok(())
362}