1mod mainnet1;
8mod mainnet2;
9mod mainnet3;
10mod mainnet4;
11mod reset_halt_bit;
12mod simple;
13mod testnet72;
14mod testnet74;
15mod testnet76;
16mod testnet77;
17mod testnet78;
18
19use anyhow::{ensure, Context};
20use penumbra_sdk_governance::StateReadExt;
21use penumbra_sdk_sct::component::clock::EpochRead;
22use std::path::{Path, PathBuf};
23use tracing::instrument;
24
25use cnidarium::Storage;
26use penumbra_sdk_app::SUBSTORE_PREFIXES;
27
28use flate2::write::GzEncoder;
29use flate2::Compression;
30use std::fs::File;
31
32#[derive(Debug)]
34pub enum Migration {
35 ReadyToStart,
37 SimpleMigration,
40 Testnet72,
43 Testnet74,
48 Testnet76,
52 Testnet77,
55 Testnet78,
59 Mainnet1,
62 Mainnet2,
65 Mainnet3,
68 Mainnet4,
73}
74
75impl Migration {
76 #[instrument(skip(pd_home, genesis_start, force))]
77 pub async fn migrate(
78 &self,
79 pd_home: PathBuf,
80 comet_home: Option<PathBuf>,
81 genesis_start: Option<tendermint::time::Time>,
82 force: bool,
83 ) -> anyhow::Result<()> {
84 tracing::debug!(
85 ?pd_home,
86 ?genesis_start,
87 ?force,
88 "preparing to run migration!"
89 );
90 let rocksdb_dir = pd_home.join("rocksdb");
91 let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
92 ensure!(
93 storage.latest_snapshot().is_chain_halted().await || force,
94 "to run a migration, the chain halt bit must be set to `true` or use the `--force` cli flag"
95 );
96
97 let latest_version = storage.latest_version();
99 let block_height = storage.latest_snapshot().get_block_height().await?;
100 ensure!(
101 latest_version == block_height || force,
102 "local chain state version is corrupted: {} != {}",
103 latest_version,
104 block_height
105 );
106
107 tracing::info!("started migration");
108
109 if let Migration::ReadyToStart = self {
111 reset_halt_bit::migrate(storage, pd_home, genesis_start).await?;
112 return Ok(());
113 }
114
115 match self {
116 Migration::SimpleMigration => {
117 simple::migrate(storage, pd_home.clone(), genesis_start).await?
118 }
119 Migration::Mainnet1 => {
120 mainnet1::migrate(storage, pd_home.clone(), genesis_start).await?;
121 }
122 Migration::Mainnet2 => {
123 mainnet2::migrate(storage, pd_home.clone(), genesis_start).await?;
124 }
125 Migration::Mainnet3 => {
126 mainnet3::migrate(storage, pd_home.clone(), genesis_start).await?;
127 }
128 Migration::Mainnet4 => {
129 mainnet4::migrate(storage, pd_home.clone(), genesis_start).await?;
130 }
131 _ => unimplemented!("the specified migration is unimplemented"),
134 }
135
136 if let Some(comet_home) = comet_home {
137 let genesis_path = pd_home.join("genesis.json");
138 migrate_comet_data(comet_home, genesis_path).await?;
139 }
140
141 Ok(())
142 }
143}
144
145pub fn archive_directory(
148 src_directory: PathBuf,
149 archive_filepath: PathBuf,
150 subdir_within_archive: Option<String>,
151) -> anyhow::Result<()> {
152 if archive_filepath.exists() {
154 tracing::error!(
155 "export archive filepath already exists: {}",
156 archive_filepath.display()
157 );
158 anyhow::bail!("refusing to overwrite existing archive");
159 }
160
161 tracing::info!(
162 "creating archive {} -> {}",
163 src_directory.display(),
164 archive_filepath.display()
165 );
166 let tarball_file = File::create(&archive_filepath)
167 .context("failed to create file for archive: check parent directory and permissions")?;
168 let enc = GzEncoder::new(tarball_file, Compression::default());
169 let mut tarball = tar::Builder::new(enc);
170 let subdir_within_archive = subdir_within_archive.unwrap_or(String::from("."));
171 tarball
172 .append_dir_all(subdir_within_archive, src_directory.as_path())
173 .context("failed to package archive contents")?;
174 Ok(())
175}
176
177pub async fn last_block_timestamp(home: PathBuf) -> anyhow::Result<tendermint::Time> {
179 let rocksdb = home.join("rocksdb");
180 let storage = Storage::load(rocksdb, SUBSTORE_PREFIXES.to_vec())
181 .await
182 .context("error loading store for timestamp")?;
183 let state = storage.latest_snapshot();
184 let last_block_time = state
185 .get_current_block_timestamp()
186 .await
187 .context("error reading latest block timestamp")?;
188 storage.release().await;
189 Ok(last_block_time)
190}
191
192#[instrument(skip_all)]
193pub async fn migrate_comet_data(
194 comet_home: PathBuf,
195 new_genesis_file: PathBuf,
196) -> anyhow::Result<()> {
197 tracing::info!(?comet_home, ?new_genesis_file, "migrating comet data");
198
199 let genesis_contents =
201 std::fs::read_to_string(new_genesis_file).context("error reading new genesis file")?;
202 let genesis_json: serde_json::Value =
203 serde_json::from_str(&genesis_contents).context("error parsing new genesis file")?;
204 tracing::info!(?genesis_json, "parsed genesis file");
205 let initial_height = genesis_json["initial_height"]
206 .as_str()
207 .context("error reading initial_height from genesis file")?
208 .parse::<u64>()?;
209
210 let genesis_file = comet_home.join("config").join("genesis.json");
212 tracing::info!(?genesis_file, "writing genesis file to comet config");
213 std::fs::write(genesis_file, genesis_contents)
214 .context("error writing genesis file to comet config")?;
215
216 adjust_priv_validator_state(&comet_home, initial_height)?;
218
219 clear_comet_data(&comet_home)?;
221
222 Ok(())
223}
224
225#[instrument(skip_all)]
226fn adjust_priv_validator_state(comet_home: &Path, initial_height: u64) -> anyhow::Result<()> {
227 let priv_validator_state = comet_home.join("data").join("priv_validator_state.json");
228 let current_state: serde_json::Value =
229 serde_json::from_str(&std::fs::read_to_string(&priv_validator_state)?)?;
230
231 let current_height = current_state["height"]
232 .as_str()
233 .context("error reading height from priv_validator_state.json")?
234 .parse::<u64>()?;
235 if current_height < initial_height {
236 tracing::info!(
237 "increasing height in priv_validator_state from {} to {}",
238 current_height,
239 initial_height
240 );
241 let new_state = serde_json::json!({
242 "height": initial_height.to_string(), "round": 0,
244 "step": 0,
245 });
246 tracing::info!(?new_state, "updated priv_validator_state.json");
247 std::fs::write(
248 &priv_validator_state,
249 &serde_json::to_string_pretty(&new_state)?,
250 )?;
251 } else {
252 anyhow::bail!(
253 "priv_validator_state height {} is already greater than or equal to initial_height {}",
254 current_height,
255 initial_height
256 );
257 }
258
259 Ok(())
260}
261
262#[instrument(skip_all)]
263fn clear_comet_data(comet_home: &Path) -> anyhow::Result<()> {
264 let data_dir = comet_home.join("data");
265
266 for subdir in &["evidence.db", "state.db", "blockstore.db", "cs.wal"] {
271 let path = data_dir.join(subdir);
272 if path.exists() {
273 tracing::info!(?path, "removing file");
274 std::fs::remove_dir_all(path)?;
275 }
276 }
277
278 Ok(())
279}